Did an svn copy of the release_33 branch of Clang.

llvm-svn: 189056
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/CMakeLists.txt b/safecode/tools/clang/lib/StaticAnalyzer/CMakeLists.txt
new file mode 100644
index 0000000..3d15092
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/CMakeLists.txt
@@ -0,0 +1,3 @@
+add_subdirectory(Core)
+add_subdirectory(Checkers)
+add_subdirectory(Frontend)
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.cpp
new file mode 100644
index 0000000..3dec8a5
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.cpp
@@ -0,0 +1,24 @@
+//=- AllocationDiagnostics.cpp - Config options for allocation diags *- C++ -*-//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Declares the configuration functions for leaks/allocation diagnostics.
+//
+//===--------------------------
+
+#include "AllocationDiagnostics.h"
+
+namespace clang {
+namespace ento {
+
+bool shouldIncludeAllocationSiteInLeakDiagnostics(AnalyzerOptions &AOpts) {
+  return AOpts.getBooleanOption("leak-diagnostics-reference-allocation",
+                                false);
+}
+
+}}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.h b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.h
new file mode 100644
index 0000000..2b314a3
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/AllocationDiagnostics.h
@@ -0,0 +1,31 @@
+//=--- AllocationDiagnostics.h - Config options for allocation diags *- C++ -*-//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Declares the configuration functions for leaks/allocation diagnostics.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SA_LIB_CHECKERS_ALLOC_DIAGS_H
+#define LLVM_CLANG_SA_LIB_CHECKERS_ALLOC_DIAGS_H
+
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
+
+namespace clang { namespace ento {
+
+/// \brief Returns true if leak diagnostics should directly reference
+/// the allocatin site (where possible).
+///
+/// The default is false.
+///
+bool shouldIncludeAllocationSiteInLeakDiagnostics(AnalyzerOptions &AOpts);
+
+}}
+
+#endif
+
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
new file mode 100644
index 0000000..9af0a5a
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
@@ -0,0 +1,140 @@
+//==--AnalyzerStatsChecker.cpp - Analyzer visitation statistics --*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This file reports various statistics about analyzer visitation.
+//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "StatsChecker"
+
+#include "ClangSACheckers.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+STATISTIC(NumBlocks,
+          "The # of blocks in top level functions");
+STATISTIC(NumBlocksUnreachable,
+          "The # of unreachable blocks in analyzing top level functions");
+
+namespace {
+class AnalyzerStatsChecker : public Checker<check::EndAnalysis> {
+public:
+  void checkEndAnalysis(ExplodedGraph &G, BugReporter &B,ExprEngine &Eng) const;
+};
+}
+
+void AnalyzerStatsChecker::checkEndAnalysis(ExplodedGraph &G,
+                                            BugReporter &B,
+                                            ExprEngine &Eng) const {
+  const CFG *C  = 0;
+  const SourceManager &SM = B.getSourceManager();
+  llvm::SmallPtrSet<const CFGBlock*, 256> reachable;
+
+  // Root node should have the location context of the top most function.
+  const ExplodedNode *GraphRoot = *G.roots_begin();
+  const LocationContext *LC = GraphRoot->getLocation().getLocationContext();
+
+  const Decl *D = LC->getDecl();
+
+  // Iterate over the exploded graph.
+  for (ExplodedGraph::node_iterator I = G.nodes_begin();
+      I != G.nodes_end(); ++I) {
+    const ProgramPoint &P = I->getLocation();
+
+    // Only check the coverage in the top level function (optimization).
+    if (D != P.getLocationContext()->getDecl())
+      continue;
+
+    if (Optional<BlockEntrance> BE = P.getAs<BlockEntrance>()) {
+      const CFGBlock *CB = BE->getBlock();
+      reachable.insert(CB);
+    }
+  }
+
+  // Get the CFG and the Decl of this block.
+  C = LC->getCFG();
+
+  unsigned total = 0, unreachable = 0;
+
+  // Find CFGBlocks that were not covered by any node
+  for (CFG::const_iterator I = C->begin(); I != C->end(); ++I) {
+    const CFGBlock *CB = *I;
+    ++total;
+    // Check if the block is unreachable
+    if (!reachable.count(CB)) {
+      ++unreachable;
+    }
+  }
+
+  // We never 'reach' the entry block, so correct the unreachable count
+  unreachable--;
+  // There is no BlockEntrance corresponding to the exit block as well, so
+  // assume it is reached as well.
+  unreachable--;
+
+  // Generate the warning string
+  SmallString<128> buf;
+  llvm::raw_svector_ostream output(buf);
+  PresumedLoc Loc = SM.getPresumedLoc(D->getLocation());
+  if (!Loc.isValid())
+    return;
+
+  if (isa<FunctionDecl>(D) || isa<ObjCMethodDecl>(D)) {
+    const NamedDecl *ND = cast<NamedDecl>(D);
+    output << *ND;
+  }
+  else if (isa<BlockDecl>(D)) {
+    output << "block(line:" << Loc.getLine() << ":col:" << Loc.getColumn();
+  }
+  
+  NumBlocksUnreachable += unreachable;
+  NumBlocks += total;
+  std::string NameOfRootFunction = output.str();
+
+  output << " -> Total CFGBlocks: " << total << " | Unreachable CFGBlocks: "
+      << unreachable << " | Exhausted Block: "
+      << (Eng.wasBlocksExhausted() ? "yes" : "no")
+      << " | Empty WorkList: "
+      << (Eng.hasEmptyWorkList() ? "yes" : "no");
+
+  B.EmitBasicReport(D, "Analyzer Statistics", "Internal Statistics",
+                    output.str(), PathDiagnosticLocation(D, SM));
+
+  // Emit warning for each block we bailed out on.
+  typedef CoreEngine::BlocksExhausted::const_iterator ExhaustedIterator;
+  const CoreEngine &CE = Eng.getCoreEngine();
+  for (ExhaustedIterator I = CE.blocks_exhausted_begin(),
+      E = CE.blocks_exhausted_end(); I != E; ++I) {
+    const BlockEdge &BE =  I->first;
+    const CFGBlock *Exit = BE.getDst();
+    const CFGElement &CE = Exit->front();
+    if (Optional<CFGStmt> CS = CE.getAs<CFGStmt>()) {
+      SmallString<128> bufI;
+      llvm::raw_svector_ostream outputI(bufI);
+      outputI << "(" << NameOfRootFunction << ")" <<
+                 ": The analyzer generated a sink at this point";
+      B.EmitBasicReport(
+          D, "Sink Point", "Internal Statistics", outputI.str(),
+          PathDiagnosticLocation::createBegin(CS->getStmt(), SM, LC));
+    }
+  }
+}
+
+void ento::registerAnalyzerStatsChecker(CheckerManager &mgr) {
+  mgr.registerChecker<AnalyzerStatsChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
new file mode 100644
index 0000000..312bc74
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
@@ -0,0 +1,92 @@
+//== ArrayBoundChecker.cpp ------------------------------*- C++ -*--==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines ArrayBoundChecker, which is a path-sensitive check
+// which looks for an out-of-bound array element access.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class ArrayBoundChecker : 
+    public Checker<check::Location> {
+  mutable OwningPtr<BuiltinBug> BT;
+public:
+  void checkLocation(SVal l, bool isLoad, const Stmt* S,
+                     CheckerContext &C) const;
+};
+}
+
+void ArrayBoundChecker::checkLocation(SVal l, bool isLoad, const Stmt* LoadS,
+                                      CheckerContext &C) const {
+  // Check for out of bound array element access.
+  const MemRegion *R = l.getAsRegion();
+  if (!R)
+    return;
+
+  const ElementRegion *ER = dyn_cast<ElementRegion>(R);
+  if (!ER)
+    return;
+
+  // Get the index of the accessed element.
+  DefinedOrUnknownSVal Idx = ER->getIndex().castAs<DefinedOrUnknownSVal>();
+
+  // Zero index is always in bound, this also passes ElementRegions created for
+  // pointer casts.
+  if (Idx.isZeroConstant())
+    return;
+
+  ProgramStateRef state = C.getState();
+
+  // Get the size of the array.
+  DefinedOrUnknownSVal NumElements 
+    = C.getStoreManager().getSizeInElements(state, ER->getSuperRegion(), 
+                                            ER->getValueType());
+
+  ProgramStateRef StInBound = state->assumeInBound(Idx, NumElements, true);
+  ProgramStateRef StOutBound = state->assumeInBound(Idx, NumElements, false);
+  if (StOutBound && !StInBound) {
+    ExplodedNode *N = C.generateSink(StOutBound);
+    if (!N)
+      return;
+  
+    if (!BT)
+      BT.reset(new BuiltinBug("Out-of-bound array access",
+                       "Access out-of-bound array element (buffer overflow)"));
+
+    // FIXME: It would be nice to eventually make this diagnostic more clear,
+    // e.g., by referencing the original declaration or by saying *why* this
+    // reference is outside the range.
+
+    // Generate a report for this bug.
+    BugReport *report = 
+      new BugReport(*BT, BT->getDescription(), N);
+
+    report->addRange(LoadS->getSourceRange());
+    C.emitReport(report);
+    return;
+  }
+  
+  // Array bound check succeeded.  From this point forward the array bound
+  // should always succeed.
+  C.addTransition(StInBound);
+}
+
+void ento::registerArrayBoundChecker(CheckerManager &mgr) {
+  mgr.registerChecker<ArrayBoundChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
new file mode 100644
index 0000000..5e4b824
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
@@ -0,0 +1,317 @@
+//== ArrayBoundCheckerV2.cpp ------------------------------------*- C++ -*--==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines ArrayBoundCheckerV2, which is a path-sensitive check
+// which looks for an out-of-bound array element access.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class ArrayBoundCheckerV2 : 
+    public Checker<check::Location> {
+  mutable OwningPtr<BuiltinBug> BT;
+      
+  enum OOB_Kind { OOB_Precedes, OOB_Excedes, OOB_Tainted };
+  
+  void reportOOB(CheckerContext &C, ProgramStateRef errorState,
+                 OOB_Kind kind) const;
+      
+public:
+  void checkLocation(SVal l, bool isLoad, const Stmt*S,
+                     CheckerContext &C) const;
+};
+
+// FIXME: Eventually replace RegionRawOffset with this class.
+class RegionRawOffsetV2 {
+private:
+  const SubRegion *baseRegion;
+  SVal byteOffset;
+  
+  RegionRawOffsetV2()
+    : baseRegion(0), byteOffset(UnknownVal()) {}
+
+public:
+  RegionRawOffsetV2(const SubRegion* base, SVal offset)
+    : baseRegion(base), byteOffset(offset) {}
+
+  NonLoc getByteOffset() const { return byteOffset.castAs<NonLoc>(); }
+  const SubRegion *getRegion() const { return baseRegion; }
+  
+  static RegionRawOffsetV2 computeOffset(ProgramStateRef state,
+                                         SValBuilder &svalBuilder,
+                                         SVal location);
+
+  void dump() const;
+  void dumpToStream(raw_ostream &os) const;
+};
+}
+
+static SVal computeExtentBegin(SValBuilder &svalBuilder, 
+                               const MemRegion *region) {
+  while (true)
+    switch (region->getKind()) {
+      default:
+        return svalBuilder.makeZeroArrayIndex();        
+      case MemRegion::SymbolicRegionKind:
+        // FIXME: improve this later by tracking symbolic lower bounds
+        // for symbolic regions.
+        return UnknownVal();
+      case MemRegion::ElementRegionKind:
+        region = cast<SubRegion>(region)->getSuperRegion();
+        continue;
+    }
+}
+
+void ArrayBoundCheckerV2::checkLocation(SVal location, bool isLoad,
+                                        const Stmt* LoadS,
+                                        CheckerContext &checkerContext) const {
+
+  // NOTE: Instead of using ProgramState::assumeInBound(), we are prototyping
+  // some new logic here that reasons directly about memory region extents.
+  // Once that logic is more mature, we can bring it back to assumeInBound()
+  // for all clients to use.
+  //
+  // The algorithm we are using here for bounds checking is to see if the
+  // memory access is within the extent of the base region.  Since we
+  // have some flexibility in defining the base region, we can achieve
+  // various levels of conservatism in our buffer overflow checking.
+  ProgramStateRef state = checkerContext.getState();  
+  ProgramStateRef originalState = state;
+
+  SValBuilder &svalBuilder = checkerContext.getSValBuilder();
+  const RegionRawOffsetV2 &rawOffset = 
+    RegionRawOffsetV2::computeOffset(state, svalBuilder, location);
+
+  if (!rawOffset.getRegion())
+    return;
+
+  // CHECK LOWER BOUND: Is byteOffset < extent begin?  
+  //  If so, we are doing a load/store
+  //  before the first valid offset in the memory region.
+
+  SVal extentBegin = computeExtentBegin(svalBuilder, rawOffset.getRegion());
+  
+  if (Optional<NonLoc> NV = extentBegin.getAs<NonLoc>()) {
+    SVal lowerBound =
+        svalBuilder.evalBinOpNN(state, BO_LT, rawOffset.getByteOffset(), *NV,
+                                svalBuilder.getConditionType());
+
+    Optional<NonLoc> lowerBoundToCheck = lowerBound.getAs<NonLoc>();
+    if (!lowerBoundToCheck)
+      return;
+    
+    ProgramStateRef state_precedesLowerBound, state_withinLowerBound;
+    llvm::tie(state_precedesLowerBound, state_withinLowerBound) =
+      state->assume(*lowerBoundToCheck);
+
+    // Are we constrained enough to definitely precede the lower bound?
+    if (state_precedesLowerBound && !state_withinLowerBound) {
+      reportOOB(checkerContext, state_precedesLowerBound, OOB_Precedes);
+      return;
+    }
+  
+    // Otherwise, assume the constraint of the lower bound.
+    assert(state_withinLowerBound);
+    state = state_withinLowerBound;
+  }
+  
+  do {
+    // CHECK UPPER BOUND: Is byteOffset >= extent(baseRegion)?  If so,
+    // we are doing a load/store after the last valid offset.
+    DefinedOrUnknownSVal extentVal =
+      rawOffset.getRegion()->getExtent(svalBuilder);
+    if (!extentVal.getAs<NonLoc>())
+      break;
+
+    SVal upperbound
+      = svalBuilder.evalBinOpNN(state, BO_GE, rawOffset.getByteOffset(),
+                                extentVal.castAs<NonLoc>(),
+                                svalBuilder.getConditionType());
+  
+    Optional<NonLoc> upperboundToCheck = upperbound.getAs<NonLoc>();
+    if (!upperboundToCheck)
+      break;
+  
+    ProgramStateRef state_exceedsUpperBound, state_withinUpperBound;
+    llvm::tie(state_exceedsUpperBound, state_withinUpperBound) =
+      state->assume(*upperboundToCheck);
+
+    // If we are under constrained and the index variables are tainted, report.
+    if (state_exceedsUpperBound && state_withinUpperBound) {
+      if (state->isTainted(rawOffset.getByteOffset()))
+        reportOOB(checkerContext, state_exceedsUpperBound, OOB_Tainted);
+        return;
+    }
+  
+    // If we are constrained enough to definitely exceed the upper bound, report.
+    if (state_exceedsUpperBound) {
+      assert(!state_withinUpperBound);
+      reportOOB(checkerContext, state_exceedsUpperBound, OOB_Excedes);
+      return;
+    }
+  
+    assert(state_withinUpperBound);
+    state = state_withinUpperBound;
+  }
+  while (false);
+  
+  if (state != originalState)
+    checkerContext.addTransition(state);
+}
+
+void ArrayBoundCheckerV2::reportOOB(CheckerContext &checkerContext,
+                                    ProgramStateRef errorState,
+                                    OOB_Kind kind) const {
+  
+  ExplodedNode *errorNode = checkerContext.generateSink(errorState);
+  if (!errorNode)
+    return;
+
+  if (!BT)
+    BT.reset(new BuiltinBug("Out-of-bound access"));
+
+  // FIXME: This diagnostics are preliminary.  We should get far better
+  // diagnostics for explaining buffer overruns.
+
+  SmallString<256> buf;
+  llvm::raw_svector_ostream os(buf);
+  os << "Out of bound memory access ";
+  switch (kind) {
+  case OOB_Precedes:
+    os << "(accessed memory precedes memory block)";
+    break;
+  case OOB_Excedes:
+    os << "(access exceeds upper limit of memory block)";
+    break;
+  case OOB_Tainted:
+    os << "(index is tainted)";
+    break;
+  }
+
+  checkerContext.emitReport(new BugReport(*BT, os.str(), errorNode));
+}
+
+void RegionRawOffsetV2::dump() const {
+  dumpToStream(llvm::errs());
+}
+
+void RegionRawOffsetV2::dumpToStream(raw_ostream &os) const {
+  os << "raw_offset_v2{" << getRegion() << ',' << getByteOffset() << '}';
+}
+
+// FIXME: Merge with the implementation of the same method in Store.cpp
+static bool IsCompleteType(ASTContext &Ctx, QualType Ty) {
+  if (const RecordType *RT = Ty->getAs<RecordType>()) {
+    const RecordDecl *D = RT->getDecl();
+    if (!D->getDefinition())
+      return false;
+  }
+
+  return true;
+}
+
+
+// Lazily computes a value to be used by 'computeOffset'.  If 'val'
+// is unknown or undefined, we lazily substitute '0'.  Otherwise,
+// return 'val'.
+static inline SVal getValue(SVal val, SValBuilder &svalBuilder) {
+  return val.getAs<UndefinedVal>() ? svalBuilder.makeArrayIndex(0) : val;
+}
+
+// Scale a base value by a scaling factor, and return the scaled
+// value as an SVal.  Used by 'computeOffset'.
+static inline SVal scaleValue(ProgramStateRef state,
+                              NonLoc baseVal, CharUnits scaling,
+                              SValBuilder &sb) {
+  return sb.evalBinOpNN(state, BO_Mul, baseVal,
+                        sb.makeArrayIndex(scaling.getQuantity()),
+                        sb.getArrayIndexType());
+}
+
+// Add an SVal to another, treating unknown and undefined values as
+// summing to UnknownVal.  Used by 'computeOffset'.
+static SVal addValue(ProgramStateRef state, SVal x, SVal y,
+                     SValBuilder &svalBuilder) {
+  // We treat UnknownVals and UndefinedVals the same here because we
+  // only care about computing offsets.
+  if (x.isUnknownOrUndef() || y.isUnknownOrUndef())
+    return UnknownVal();
+
+  return svalBuilder.evalBinOpNN(state, BO_Add, x.castAs<NonLoc>(),
+                                 y.castAs<NonLoc>(),
+                                 svalBuilder.getArrayIndexType());
+}
+
+/// Compute a raw byte offset from a base region.  Used for array bounds
+/// checking.
+RegionRawOffsetV2 RegionRawOffsetV2::computeOffset(ProgramStateRef state,
+                                                   SValBuilder &svalBuilder,
+                                                   SVal location)
+{
+  const MemRegion *region = location.getAsRegion();
+  SVal offset = UndefinedVal();
+  
+  while (region) {
+    switch (region->getKind()) {
+      default: {
+        if (const SubRegion *subReg = dyn_cast<SubRegion>(region)) {
+          offset = getValue(offset, svalBuilder);
+          if (!offset.isUnknownOrUndef())
+            return RegionRawOffsetV2(subReg, offset);
+        }
+        return RegionRawOffsetV2();
+      }
+      case MemRegion::ElementRegionKind: {
+        const ElementRegion *elemReg = cast<ElementRegion>(region);
+        SVal index = elemReg->getIndex();
+        if (!index.getAs<NonLoc>())
+          return RegionRawOffsetV2();
+        QualType elemType = elemReg->getElementType();
+        // If the element is an incomplete type, go no further.
+        ASTContext &astContext = svalBuilder.getContext();
+        if (!IsCompleteType(astContext, elemType))
+          return RegionRawOffsetV2();
+        
+        // Update the offset.
+        offset = addValue(state,
+                          getValue(offset, svalBuilder),
+                          scaleValue(state,
+                          index.castAs<NonLoc>(),
+                          astContext.getTypeSizeInChars(elemType),
+                          svalBuilder),
+                          svalBuilder);
+
+        if (offset.isUnknownOrUndef())
+          return RegionRawOffsetV2();
+
+        region = elemReg->getSuperRegion();
+        continue;
+      }
+    }
+  }
+  return RegionRawOffsetV2();
+}
+
+
+void ento::registerArrayBoundCheckerV2(CheckerManager &mgr) {
+  mgr.registerChecker<ArrayBoundCheckerV2>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
new file mode 100644
index 0000000..fba14a0
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
@@ -0,0 +1,937 @@
+//== BasicObjCFoundationChecks.cpp - Simple Apple-Foundation checks -*- C++ -*--
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines BasicObjCFoundationChecks, a class that encapsulates
+//  a set of simple checks to run on Objective-C code using Apple's Foundation
+//  classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class APIMisuse : public BugType {
+public:
+  APIMisuse(const char* name) : BugType(name, "API Misuse (Apple)") {}
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Utility functions.
+//===----------------------------------------------------------------------===//
+
+static StringRef GetReceiverInterfaceName(const ObjCMethodCall &msg) {
+  if (const ObjCInterfaceDecl *ID = msg.getReceiverInterface())
+    return ID->getIdentifier()->getName();
+  return StringRef();
+}
+
+enum FoundationClass {
+  FC_None,
+  FC_NSArray,
+  FC_NSDictionary,
+  FC_NSEnumerator,
+  FC_NSOrderedSet,
+  FC_NSSet,
+  FC_NSString
+};
+
+static FoundationClass findKnownClass(const ObjCInterfaceDecl *ID) {
+  static llvm::StringMap<FoundationClass> Classes;
+  if (Classes.empty()) {
+    Classes["NSArray"] = FC_NSArray;
+    Classes["NSDictionary"] = FC_NSDictionary;
+    Classes["NSEnumerator"] = FC_NSEnumerator;
+    Classes["NSOrderedSet"] = FC_NSOrderedSet;
+    Classes["NSSet"] = FC_NSSet;
+    Classes["NSString"] = FC_NSString;
+  }
+
+  // FIXME: Should we cache this at all?
+  FoundationClass result = Classes.lookup(ID->getIdentifier()->getName());
+  if (result == FC_None)
+    if (const ObjCInterfaceDecl *Super = ID->getSuperClass())
+      return findKnownClass(Super);
+
+  return result;
+}
+
+//===----------------------------------------------------------------------===//
+// NilArgChecker - Check for prohibited nil arguments to ObjC method calls.
+//===----------------------------------------------------------------------===//
+
+namespace {
+  class NilArgChecker : public Checker<check::PreObjCMessage> {
+    mutable OwningPtr<APIMisuse> BT;
+
+    void WarnIfNilArg(CheckerContext &C,
+                    const ObjCMethodCall &msg, unsigned Arg,
+                    FoundationClass Class,
+                    bool CanBeSubscript = false) const;
+
+  public:
+    void checkPreObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const;
+  };
+}
+
+void NilArgChecker::WarnIfNilArg(CheckerContext &C,
+                                 const ObjCMethodCall &msg,
+                                 unsigned int Arg,
+                                 FoundationClass Class,
+                                 bool CanBeSubscript) const {
+  // Check if the argument is nil.
+  ProgramStateRef State = C.getState();
+  if (!State->isNull(msg.getArgSVal(Arg)).isConstrainedTrue())
+      return;
+      
+  if (!BT)
+    BT.reset(new APIMisuse("nil argument"));
+
+  if (ExplodedNode *N = C.generateSink()) {
+    SmallString<128> sbuf;
+    llvm::raw_svector_ostream os(sbuf);
+
+    if (CanBeSubscript && msg.getMessageKind() == OCM_Subscript) {
+
+      if (Class == FC_NSArray) {
+        os << "Array element cannot be nil";
+      } else if (Class == FC_NSDictionary) {
+        if (Arg == 0) {
+          os << "Value stored into '";
+          os << GetReceiverInterfaceName(msg) << "' cannot be nil";
+        } else {
+          assert(Arg == 1);
+          os << "'"<< GetReceiverInterfaceName(msg) << "' key cannot be nil";
+        }
+      } else
+        llvm_unreachable("Missing foundation class for the subscript expr");
+
+    } else {
+      if (Class == FC_NSDictionary) {
+        if (Arg == 0)
+          os << "Value argument ";
+        else {
+          assert(Arg == 1);
+          os << "Key argument ";
+        }
+        os << "to '" << msg.getSelector().getAsString() << "' cannot be nil";
+      } else {
+        os << "Argument to '" << GetReceiverInterfaceName(msg) << "' method '"
+        << msg.getSelector().getAsString() << "' cannot be nil";
+      }
+    }
+
+    BugReport *R = new BugReport(*BT, os.str(), N);
+    R->addRange(msg.getArgSourceRange(Arg));
+    bugreporter::trackNullOrUndefValue(N, msg.getArgExpr(Arg), *R);
+    C.emitReport(R);
+  }
+}
+
+void NilArgChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
+                                        CheckerContext &C) const {
+  const ObjCInterfaceDecl *ID = msg.getReceiverInterface();
+  if (!ID)
+    return;
+
+  FoundationClass Class = findKnownClass(ID);
+
+  static const unsigned InvalidArgIndex = UINT_MAX;
+  unsigned Arg = InvalidArgIndex;
+  bool CanBeSubscript = false;
+  
+  if (Class == FC_NSString) {
+    Selector S = msg.getSelector();
+    
+    if (S.isUnarySelector())
+      return;
+    
+    // FIXME: This is going to be really slow doing these checks with
+    //  lexical comparisons.
+    
+    std::string NameStr = S.getAsString();
+    StringRef Name(NameStr);
+    assert(!Name.empty());
+    
+    // FIXME: Checking for initWithFormat: will not work in most cases
+    //  yet because [NSString alloc] returns id, not NSString*.  We will
+    //  need support for tracking expected-type information in the analyzer
+    //  to find these errors.
+    if (Name == "caseInsensitiveCompare:" ||
+        Name == "compare:" ||
+        Name == "compare:options:" ||
+        Name == "compare:options:range:" ||
+        Name == "compare:options:range:locale:" ||
+        Name == "componentsSeparatedByCharactersInSet:" ||
+        Name == "initWithFormat:") {
+      Arg = 0;
+    }
+  } else if (Class == FC_NSArray) {
+    Selector S = msg.getSelector();
+
+    if (S.isUnarySelector())
+      return;
+
+    if (S.getNameForSlot(0).equals("addObject")) {
+      Arg = 0;
+    } else if (S.getNameForSlot(0).equals("insertObject") &&
+               S.getNameForSlot(1).equals("atIndex")) {
+      Arg = 0;
+    } else if (S.getNameForSlot(0).equals("replaceObjectAtIndex") &&
+               S.getNameForSlot(1).equals("withObject")) {
+      Arg = 1;
+    } else if (S.getNameForSlot(0).equals("setObject") &&
+               S.getNameForSlot(1).equals("atIndexedSubscript")) {
+      Arg = 0;
+      CanBeSubscript = true;
+    } else if (S.getNameForSlot(0).equals("arrayByAddingObject")) {
+      Arg = 0;
+    }
+  } else if (Class == FC_NSDictionary) {
+    Selector S = msg.getSelector();
+
+    if (S.isUnarySelector())
+      return;
+
+    if (S.getNameForSlot(0).equals("dictionaryWithObject") &&
+        S.getNameForSlot(1).equals("forKey")) {
+      Arg = 0;
+      WarnIfNilArg(C, msg, /* Arg */1, Class);
+    } else if (S.getNameForSlot(0).equals("setObject") &&
+               S.getNameForSlot(1).equals("forKey")) {
+      Arg = 0;
+      WarnIfNilArg(C, msg, /* Arg */1, Class);
+    } else if (S.getNameForSlot(0).equals("setObject") &&
+               S.getNameForSlot(1).equals("forKeyedSubscript")) {
+      CanBeSubscript = true;
+      Arg = 0;
+      WarnIfNilArg(C, msg, /* Arg */1, Class, CanBeSubscript);
+    } else if (S.getNameForSlot(0).equals("removeObjectForKey")) {
+      Arg = 0;
+    }
+  }
+
+
+  // If argument is '0', report a warning.
+  if ((Arg != InvalidArgIndex))
+    WarnIfNilArg(C, msg, Arg, Class, CanBeSubscript);
+
+}
+
+//===----------------------------------------------------------------------===//
+// Error reporting.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class CFNumberCreateChecker : public Checker< check::PreStmt<CallExpr> > {
+  mutable OwningPtr<APIMisuse> BT;
+  mutable IdentifierInfo* II;
+public:
+  CFNumberCreateChecker() : II(0) {}
+
+  void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+
+private:
+  void EmitError(const TypedRegion* R, const Expr *Ex,
+                uint64_t SourceSize, uint64_t TargetSize, uint64_t NumberKind);
+};
+} // end anonymous namespace
+
+enum CFNumberType {
+  kCFNumberSInt8Type = 1,
+  kCFNumberSInt16Type = 2,
+  kCFNumberSInt32Type = 3,
+  kCFNumberSInt64Type = 4,
+  kCFNumberFloat32Type = 5,
+  kCFNumberFloat64Type = 6,
+  kCFNumberCharType = 7,
+  kCFNumberShortType = 8,
+  kCFNumberIntType = 9,
+  kCFNumberLongType = 10,
+  kCFNumberLongLongType = 11,
+  kCFNumberFloatType = 12,
+  kCFNumberDoubleType = 13,
+  kCFNumberCFIndexType = 14,
+  kCFNumberNSIntegerType = 15,
+  kCFNumberCGFloatType = 16
+};
+
+static Optional<uint64_t> GetCFNumberSize(ASTContext &Ctx, uint64_t i) {
+  static const unsigned char FixedSize[] = { 8, 16, 32, 64, 32, 64 };
+
+  if (i < kCFNumberCharType)
+    return FixedSize[i-1];
+
+  QualType T;
+
+  switch (i) {
+    case kCFNumberCharType:     T = Ctx.CharTy;     break;
+    case kCFNumberShortType:    T = Ctx.ShortTy;    break;
+    case kCFNumberIntType:      T = Ctx.IntTy;      break;
+    case kCFNumberLongType:     T = Ctx.LongTy;     break;
+    case kCFNumberLongLongType: T = Ctx.LongLongTy; break;
+    case kCFNumberFloatType:    T = Ctx.FloatTy;    break;
+    case kCFNumberDoubleType:   T = Ctx.DoubleTy;   break;
+    case kCFNumberCFIndexType:
+    case kCFNumberNSIntegerType:
+    case kCFNumberCGFloatType:
+      // FIXME: We need a way to map from names to Type*.
+    default:
+      return None;
+  }
+
+  return Ctx.getTypeSize(T);
+}
+
+#if 0
+static const char* GetCFNumberTypeStr(uint64_t i) {
+  static const char* Names[] = {
+    "kCFNumberSInt8Type",
+    "kCFNumberSInt16Type",
+    "kCFNumberSInt32Type",
+    "kCFNumberSInt64Type",
+    "kCFNumberFloat32Type",
+    "kCFNumberFloat64Type",
+    "kCFNumberCharType",
+    "kCFNumberShortType",
+    "kCFNumberIntType",
+    "kCFNumberLongType",
+    "kCFNumberLongLongType",
+    "kCFNumberFloatType",
+    "kCFNumberDoubleType",
+    "kCFNumberCFIndexType",
+    "kCFNumberNSIntegerType",
+    "kCFNumberCGFloatType"
+  };
+
+  return i <= kCFNumberCGFloatType ? Names[i-1] : "Invalid CFNumberType";
+}
+#endif
+
+void CFNumberCreateChecker::checkPreStmt(const CallExpr *CE,
+                                         CheckerContext &C) const {
+  ProgramStateRef state = C.getState();
+  const FunctionDecl *FD = C.getCalleeDecl(CE);
+  if (!FD)
+    return;
+  
+  ASTContext &Ctx = C.getASTContext();
+  if (!II)
+    II = &Ctx.Idents.get("CFNumberCreate");
+
+  if (FD->getIdentifier() != II || CE->getNumArgs() != 3)
+    return;
+
+  // Get the value of the "theType" argument.
+  const LocationContext *LCtx = C.getLocationContext();
+  SVal TheTypeVal = state->getSVal(CE->getArg(1), LCtx);
+
+  // FIXME: We really should allow ranges of valid theType values, and
+  //   bifurcate the state appropriately.
+  Optional<nonloc::ConcreteInt> V = TheTypeVal.getAs<nonloc::ConcreteInt>();
+  if (!V)
+    return;
+
+  uint64_t NumberKind = V->getValue().getLimitedValue();
+  Optional<uint64_t> OptTargetSize = GetCFNumberSize(Ctx, NumberKind);
+
+  // FIXME: In some cases we can emit an error.
+  if (!OptTargetSize)
+    return;
+
+  uint64_t TargetSize = *OptTargetSize;
+
+  // Look at the value of the integer being passed by reference.  Essentially
+  // we want to catch cases where the value passed in is not equal to the
+  // size of the type being created.
+  SVal TheValueExpr = state->getSVal(CE->getArg(2), LCtx);
+
+  // FIXME: Eventually we should handle arbitrary locations.  We can do this
+  //  by having an enhanced memory model that does low-level typing.
+  Optional<loc::MemRegionVal> LV = TheValueExpr.getAs<loc::MemRegionVal>();
+  if (!LV)
+    return;
+
+  const TypedValueRegion* R = dyn_cast<TypedValueRegion>(LV->stripCasts());
+  if (!R)
+    return;
+
+  QualType T = Ctx.getCanonicalType(R->getValueType());
+
+  // FIXME: If the pointee isn't an integer type, should we flag a warning?
+  //  People can do weird stuff with pointers.
+
+  if (!T->isIntegralOrEnumerationType())
+    return;
+
+  uint64_t SourceSize = Ctx.getTypeSize(T);
+
+  // CHECK: is SourceSize == TargetSize
+  if (SourceSize == TargetSize)
+    return;
+
+  // Generate an error.  Only generate a sink if 'SourceSize < TargetSize';
+  // otherwise generate a regular node.
+  //
+  // FIXME: We can actually create an abstract "CFNumber" object that has
+  //  the bits initialized to the provided values.
+  //
+  if (ExplodedNode *N = SourceSize < TargetSize ? C.generateSink() 
+                                                : C.addTransition()) {
+    SmallString<128> sbuf;
+    llvm::raw_svector_ostream os(sbuf);
+    
+    os << (SourceSize == 8 ? "An " : "A ")
+       << SourceSize << " bit integer is used to initialize a CFNumber "
+                        "object that represents "
+       << (TargetSize == 8 ? "an " : "a ")
+       << TargetSize << " bit integer. ";
+    
+    if (SourceSize < TargetSize)
+      os << (TargetSize - SourceSize)
+      << " bits of the CFNumber value will be garbage." ;
+    else
+      os << (SourceSize - TargetSize)
+      << " bits of the input integer will be lost.";
+
+    if (!BT)
+      BT.reset(new APIMisuse("Bad use of CFNumberCreate"));
+    
+    BugReport *report = new BugReport(*BT, os.str(), N);
+    report->addRange(CE->getArg(2)->getSourceRange());
+    C.emitReport(report);
+  }
+}
+
+//===----------------------------------------------------------------------===//
+// CFRetain/CFRelease/CFMakeCollectable checking for null arguments.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class CFRetainReleaseChecker : public Checker< check::PreStmt<CallExpr> > {
+  mutable OwningPtr<APIMisuse> BT;
+  mutable IdentifierInfo *Retain, *Release, *MakeCollectable;
+public:
+  CFRetainReleaseChecker(): Retain(0), Release(0), MakeCollectable(0) {}
+  void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+};
+} // end anonymous namespace
+
+
+void CFRetainReleaseChecker::checkPreStmt(const CallExpr *CE,
+                                          CheckerContext &C) const {
+  // If the CallExpr doesn't have exactly 1 argument just give up checking.
+  if (CE->getNumArgs() != 1)
+    return;
+
+  ProgramStateRef state = C.getState();
+  const FunctionDecl *FD = C.getCalleeDecl(CE);
+  if (!FD)
+    return;
+  
+  if (!BT) {
+    ASTContext &Ctx = C.getASTContext();
+    Retain = &Ctx.Idents.get("CFRetain");
+    Release = &Ctx.Idents.get("CFRelease");
+    MakeCollectable = &Ctx.Idents.get("CFMakeCollectable");
+    BT.reset(
+      new APIMisuse("null passed to CFRetain/CFRelease/CFMakeCollectable"));
+  }
+
+  // Check if we called CFRetain/CFRelease/CFMakeCollectable.
+  const IdentifierInfo *FuncII = FD->getIdentifier();
+  if (!(FuncII == Retain || FuncII == Release || FuncII == MakeCollectable))
+    return;
+
+  // FIXME: The rest of this just checks that the argument is non-null.
+  // It should probably be refactored and combined with NonNullParamChecker.
+
+  // Get the argument's value.
+  const Expr *Arg = CE->getArg(0);
+  SVal ArgVal = state->getSVal(Arg, C.getLocationContext());
+  Optional<DefinedSVal> DefArgVal = ArgVal.getAs<DefinedSVal>();
+  if (!DefArgVal)
+    return;
+
+  // Get a NULL value.
+  SValBuilder &svalBuilder = C.getSValBuilder();
+  DefinedSVal zero =
+      svalBuilder.makeZeroVal(Arg->getType()).castAs<DefinedSVal>();
+
+  // Make an expression asserting that they're equal.
+  DefinedOrUnknownSVal ArgIsNull = svalBuilder.evalEQ(state, zero, *DefArgVal);
+
+  // Are they equal?
+  ProgramStateRef stateTrue, stateFalse;
+  llvm::tie(stateTrue, stateFalse) = state->assume(ArgIsNull);
+
+  if (stateTrue && !stateFalse) {
+    ExplodedNode *N = C.generateSink(stateTrue);
+    if (!N)
+      return;
+
+    const char *description;
+    if (FuncII == Retain)
+      description = "Null pointer argument in call to CFRetain";
+    else if (FuncII == Release)
+      description = "Null pointer argument in call to CFRelease";
+    else if (FuncII == MakeCollectable)
+      description = "Null pointer argument in call to CFMakeCollectable";
+    else
+      llvm_unreachable("impossible case");
+
+    BugReport *report = new BugReport(*BT, description, N);
+    report->addRange(Arg->getSourceRange());
+    bugreporter::trackNullOrUndefValue(N, Arg, *report);
+    C.emitReport(report);
+    return;
+  }
+
+  // From here on, we know the argument is non-null.
+  C.addTransition(stateFalse);
+}
+
+//===----------------------------------------------------------------------===//
+// Check for sending 'retain', 'release', or 'autorelease' directly to a Class.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ClassReleaseChecker : public Checker<check::PreObjCMessage> {
+  mutable Selector releaseS;
+  mutable Selector retainS;
+  mutable Selector autoreleaseS;
+  mutable Selector drainS;
+  mutable OwningPtr<BugType> BT;
+
+public:
+  void checkPreObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
+};
+}
+
+void ClassReleaseChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
+                                              CheckerContext &C) const {
+  
+  if (!BT) {
+    BT.reset(new APIMisuse("message incorrectly sent to class instead of class "
+                           "instance"));
+  
+    ASTContext &Ctx = C.getASTContext();
+    releaseS = GetNullarySelector("release", Ctx);
+    retainS = GetNullarySelector("retain", Ctx);
+    autoreleaseS = GetNullarySelector("autorelease", Ctx);
+    drainS = GetNullarySelector("drain", Ctx);
+  }
+  
+  if (msg.isInstanceMessage())
+    return;
+  const ObjCInterfaceDecl *Class = msg.getReceiverInterface();
+  assert(Class);
+
+  Selector S = msg.getSelector();
+  if (!(S == releaseS || S == retainS || S == autoreleaseS || S == drainS))
+    return;
+  
+  if (ExplodedNode *N = C.addTransition()) {
+    SmallString<200> buf;
+    llvm::raw_svector_ostream os(buf);
+
+    os << "The '" << S.getAsString() << "' message should be sent to instances "
+          "of class '" << Class->getName()
+       << "' and not the class directly";
+  
+    BugReport *report = new BugReport(*BT, os.str(), N);
+    report->addRange(msg.getSourceRange());
+    C.emitReport(report);
+  }
+}
+
+//===----------------------------------------------------------------------===//
+// Check for passing non-Objective-C types to variadic methods that expect
+// only Objective-C types.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class VariadicMethodTypeChecker : public Checker<check::PreObjCMessage> {
+  mutable Selector arrayWithObjectsS;
+  mutable Selector dictionaryWithObjectsAndKeysS;
+  mutable Selector setWithObjectsS;
+  mutable Selector orderedSetWithObjectsS;
+  mutable Selector initWithObjectsS;
+  mutable Selector initWithObjectsAndKeysS;
+  mutable OwningPtr<BugType> BT;
+
+  bool isVariadicMessage(const ObjCMethodCall &msg) const;
+
+public:
+  void checkPreObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
+};
+}
+
+/// isVariadicMessage - Returns whether the given message is a variadic message,
+/// where all arguments must be Objective-C types.
+bool
+VariadicMethodTypeChecker::isVariadicMessage(const ObjCMethodCall &msg) const {
+  const ObjCMethodDecl *MD = msg.getDecl();
+  
+  if (!MD || !MD->isVariadic() || isa<ObjCProtocolDecl>(MD->getDeclContext()))
+    return false;
+  
+  Selector S = msg.getSelector();
+  
+  if (msg.isInstanceMessage()) {
+    // FIXME: Ideally we'd look at the receiver interface here, but that's not
+    // useful for init, because alloc returns 'id'. In theory, this could lead
+    // to false positives, for example if there existed a class that had an
+    // initWithObjects: implementation that does accept non-Objective-C pointer
+    // types, but the chance of that happening is pretty small compared to the
+    // gains that this analysis gives.
+    const ObjCInterfaceDecl *Class = MD->getClassInterface();
+
+    switch (findKnownClass(Class)) {
+    case FC_NSArray:
+    case FC_NSOrderedSet:
+    case FC_NSSet:
+      return S == initWithObjectsS;
+    case FC_NSDictionary:
+      return S == initWithObjectsAndKeysS;
+    default:
+      return false;
+    }
+  } else {
+    const ObjCInterfaceDecl *Class = msg.getReceiverInterface();
+
+    switch (findKnownClass(Class)) {
+      case FC_NSArray:
+        return S == arrayWithObjectsS;
+      case FC_NSOrderedSet:
+        return S == orderedSetWithObjectsS;
+      case FC_NSSet:
+        return S == setWithObjectsS;
+      case FC_NSDictionary:
+        return S == dictionaryWithObjectsAndKeysS;
+      default:
+        return false;
+    }
+  }
+}
+
+void VariadicMethodTypeChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
+                                                    CheckerContext &C) const {
+  if (!BT) {
+    BT.reset(new APIMisuse("Arguments passed to variadic method aren't all "
+                           "Objective-C pointer types"));
+
+    ASTContext &Ctx = C.getASTContext();
+    arrayWithObjectsS = GetUnarySelector("arrayWithObjects", Ctx);
+    dictionaryWithObjectsAndKeysS = 
+      GetUnarySelector("dictionaryWithObjectsAndKeys", Ctx);
+    setWithObjectsS = GetUnarySelector("setWithObjects", Ctx);
+    orderedSetWithObjectsS = GetUnarySelector("orderedSetWithObjects", Ctx);
+
+    initWithObjectsS = GetUnarySelector("initWithObjects", Ctx);
+    initWithObjectsAndKeysS = GetUnarySelector("initWithObjectsAndKeys", Ctx);
+  }
+
+  if (!isVariadicMessage(msg))
+      return;
+
+  // We are not interested in the selector arguments since they have
+  // well-defined types, so the compiler will issue a warning for them.
+  unsigned variadicArgsBegin = msg.getSelector().getNumArgs();
+
+  // We're not interested in the last argument since it has to be nil or the
+  // compiler would have issued a warning for it elsewhere.
+  unsigned variadicArgsEnd = msg.getNumArgs() - 1;
+
+  if (variadicArgsEnd <= variadicArgsBegin)
+    return;
+
+  // Verify that all arguments have Objective-C types.
+  Optional<ExplodedNode*> errorNode;
+  ProgramStateRef state = C.getState();
+  
+  for (unsigned I = variadicArgsBegin; I != variadicArgsEnd; ++I) {
+    QualType ArgTy = msg.getArgExpr(I)->getType();
+    if (ArgTy->isObjCObjectPointerType())
+      continue;
+
+    // Block pointers are treaded as Objective-C pointers.
+    if (ArgTy->isBlockPointerType())
+      continue;
+
+    // Ignore pointer constants.
+    if (msg.getArgSVal(I).getAs<loc::ConcreteInt>())
+      continue;
+    
+    // Ignore pointer types annotated with 'NSObject' attribute.
+    if (C.getASTContext().isObjCNSObjectType(ArgTy))
+      continue;
+    
+    // Ignore CF references, which can be toll-free bridged.
+    if (coreFoundation::isCFObjectRef(ArgTy))
+      continue;
+
+    // Generate only one error node to use for all bug reports.
+    if (!errorNode.hasValue())
+      errorNode = C.addTransition();
+
+    if (!errorNode.getValue())
+      continue;
+
+    SmallString<128> sbuf;
+    llvm::raw_svector_ostream os(sbuf);
+
+    StringRef TypeName = GetReceiverInterfaceName(msg);
+    if (!TypeName.empty())
+      os << "Argument to '" << TypeName << "' method '";
+    else
+      os << "Argument to method '";
+
+    os << msg.getSelector().getAsString() 
+       << "' should be an Objective-C pointer type, not '";
+    ArgTy.print(os, C.getLangOpts());
+    os << "'";
+
+    BugReport *R = new BugReport(*BT, os.str(), errorNode.getValue());
+    R->addRange(msg.getArgSourceRange(I));
+    C.emitReport(R);
+  }
+}
+
+//===----------------------------------------------------------------------===//
+// Improves the modeling of loops over Cocoa collections.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ObjCLoopChecker
+  : public Checker<check::PostStmt<ObjCForCollectionStmt> > {
+  
+public:
+  void checkPostStmt(const ObjCForCollectionStmt *FCS, CheckerContext &C) const;
+};
+}
+
+static bool isKnownNonNilCollectionType(QualType T) {
+  const ObjCObjectPointerType *PT = T->getAs<ObjCObjectPointerType>();
+  if (!PT)
+    return false;
+  
+  const ObjCInterfaceDecl *ID = PT->getInterfaceDecl();
+  if (!ID)
+    return false;
+
+  switch (findKnownClass(ID)) {
+  case FC_NSArray:
+  case FC_NSDictionary:
+  case FC_NSEnumerator:
+  case FC_NSOrderedSet:
+  case FC_NSSet:
+    return true;
+  default:
+    return false;
+  }
+}
+
+/// Assumes that the collection is non-nil.
+///
+/// If the collection is known to be nil, returns NULL to indicate an infeasible
+/// path.
+static ProgramStateRef checkCollectionNonNil(CheckerContext &C,
+                                             ProgramStateRef State,
+                                             const ObjCForCollectionStmt *FCS) {
+  if (!State)
+    return NULL;
+
+  SVal CollectionVal = C.getSVal(FCS->getCollection());
+  Optional<DefinedSVal> KnownCollection = CollectionVal.getAs<DefinedSVal>();
+  if (!KnownCollection)
+    return State;
+
+  ProgramStateRef StNonNil, StNil;
+  llvm::tie(StNonNil, StNil) = State->assume(*KnownCollection);
+  if (StNil && !StNonNil) {
+    // The collection is nil. This path is infeasible.
+    return NULL;
+  }
+
+  return StNonNil;
+}
+
+/// Assumes that the collection elements are non-nil.
+///
+/// This only applies if the collection is one of those known not to contain
+/// nil values.
+static ProgramStateRef checkElementNonNil(CheckerContext &C,
+                                          ProgramStateRef State,
+                                          const ObjCForCollectionStmt *FCS) {
+  if (!State)
+    return NULL;
+
+  // See if the collection is one where we /know/ the elements are non-nil.
+  if (!isKnownNonNilCollectionType(FCS->getCollection()->getType()))
+    return State;
+
+  const LocationContext *LCtx = C.getLocationContext();
+  const Stmt *Element = FCS->getElement();
+
+  // FIXME: Copied from ExprEngineObjC.
+  Optional<Loc> ElementLoc;
+  if (const DeclStmt *DS = dyn_cast<DeclStmt>(Element)) {
+    const VarDecl *ElemDecl = cast<VarDecl>(DS->getSingleDecl());
+    assert(ElemDecl->getInit() == 0);
+    ElementLoc = State->getLValue(ElemDecl, LCtx);
+  } else {
+    ElementLoc = State->getSVal(Element, LCtx).getAs<Loc>();
+  }
+
+  if (!ElementLoc)
+    return State;
+
+  // Go ahead and assume the value is non-nil.
+  SVal Val = State->getSVal(*ElementLoc);
+  return State->assume(Val.castAs<DefinedOrUnknownSVal>(), true);
+}
+
+void ObjCLoopChecker::checkPostStmt(const ObjCForCollectionStmt *FCS,
+                                    CheckerContext &C) const {
+  // Check if this is the branch for the end of the loop.
+  SVal CollectionSentinel = C.getSVal(FCS);
+  if (CollectionSentinel.isZeroConstant())
+    return;
+
+  ProgramStateRef State = C.getState();
+  State = checkCollectionNonNil(C, State, FCS);
+  State = checkElementNonNil(C, State, FCS);
+
+  if (!State)
+    C.generateSink();
+  else if (State != C.getState())
+    C.addTransition(State);
+}
+
+namespace {
+/// \class ObjCNonNilReturnValueChecker
+/// \brief The checker restricts the return values of APIs known to
+/// never (or almost never) return 'nil'.
+class ObjCNonNilReturnValueChecker
+  : public Checker<check::PostObjCMessage> {
+    mutable bool Initialized;
+    mutable Selector ObjectAtIndex;
+    mutable Selector ObjectAtIndexedSubscript;
+
+public:
+  ObjCNonNilReturnValueChecker() : Initialized(false) {}
+  void checkPostObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const;
+};
+}
+
+static ProgramStateRef assumeExprIsNonNull(const Expr *NonNullExpr,
+                                           ProgramStateRef State,
+                                           CheckerContext &C) {
+  SVal Val = State->getSVal(NonNullExpr, C.getLocationContext());
+  if (Optional<DefinedOrUnknownSVal> DV = Val.getAs<DefinedOrUnknownSVal>())
+    return State->assume(*DV, true);
+  return State;
+}
+
+void ObjCNonNilReturnValueChecker::checkPostObjCMessage(const ObjCMethodCall &M,
+                                                        CheckerContext &C)
+                                                        const {
+  ProgramStateRef State = C.getState();
+
+  if (!Initialized) {
+    ASTContext &Ctx = C.getASTContext();
+    ObjectAtIndex = GetUnarySelector("objectAtIndex", Ctx);
+    ObjectAtIndexedSubscript = GetUnarySelector("objectAtIndexedSubscript", Ctx);
+  }
+
+  // Check the receiver type.
+  if (const ObjCInterfaceDecl *Interface = M.getReceiverInterface()) {
+
+    // Assume that object returned from '[self init]' or '[super init]' is not
+    // 'nil' if we are processing an inlined function/method.
+    //
+    // A defensive callee will (and should) check if the object returned by
+    // '[super init]' is 'nil' before doing it's own initialization. However,
+    // since 'nil' is rarely returned in practice, we should not warn when the
+    // caller to the defensive constructor uses the object in contexts where
+    // 'nil' is not accepted.
+    if (!C.inTopFrame() && M.getDecl() &&
+        M.getDecl()->getMethodFamily() == OMF_init &&
+        M.isReceiverSelfOrSuper()) {
+      State = assumeExprIsNonNull(M.getOriginExpr(), State, C);
+    }
+
+    // Objects returned from
+    // [NSArray|NSOrderedSet]::[ObjectAtIndex|ObjectAtIndexedSubscript]
+    // are never 'nil'.
+    FoundationClass Cl = findKnownClass(Interface);
+    if (Cl == FC_NSArray || Cl == FC_NSOrderedSet) {
+      Selector Sel = M.getSelector();
+      if (Sel == ObjectAtIndex || Sel == ObjectAtIndexedSubscript) {
+        // Go ahead and assume the value is non-nil.
+        State = assumeExprIsNonNull(M.getOriginExpr(), State, C);
+      }
+    }
+  }
+  C.addTransition(State);
+}
+
+//===----------------------------------------------------------------------===//
+// Check registration.
+//===----------------------------------------------------------------------===//
+
+void ento::registerNilArgChecker(CheckerManager &mgr) {
+  mgr.registerChecker<NilArgChecker>();
+}
+
+void ento::registerCFNumberCreateChecker(CheckerManager &mgr) {
+  mgr.registerChecker<CFNumberCreateChecker>();
+}
+
+void ento::registerCFRetainReleaseChecker(CheckerManager &mgr) {
+  mgr.registerChecker<CFRetainReleaseChecker>();
+}
+
+void ento::registerClassReleaseChecker(CheckerManager &mgr) {
+  mgr.registerChecker<ClassReleaseChecker>();
+}
+
+void ento::registerVariadicMethodTypeChecker(CheckerManager &mgr) {
+  mgr.registerChecker<VariadicMethodTypeChecker>();
+}
+
+void ento::registerObjCLoopChecker(CheckerManager &mgr) {
+  mgr.registerChecker<ObjCLoopChecker>();
+}
+
+void ento::registerObjCNonNilReturnValueChecker(CheckerManager &mgr) {
+  mgr.registerChecker<ObjCNonNilReturnValueChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
new file mode 100644
index 0000000..5169244
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
@@ -0,0 +1,157 @@
+//== BoolAssignmentChecker.cpp - Boolean assignment checker -----*- C++ -*--==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines BoolAssignmentChecker, a builtin check in ExprEngine that
+// performs checks for assignment of non-Boolean values to Boolean variables.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+  class BoolAssignmentChecker : public Checker< check::Bind > {
+    mutable OwningPtr<BuiltinBug> BT;
+    void emitReport(ProgramStateRef state, CheckerContext &C) const;
+  public:
+    void checkBind(SVal loc, SVal val, const Stmt *S, CheckerContext &C) const;
+  };
+} // end anonymous namespace
+
+void BoolAssignmentChecker::emitReport(ProgramStateRef state,
+                                       CheckerContext &C) const {
+  if (ExplodedNode *N = C.addTransition(state)) {
+    if (!BT)
+      BT.reset(new BuiltinBug("Assignment of a non-Boolean value"));    
+    C.emitReport(new BugReport(*BT, BT->getDescription(), N));
+  }
+}
+
+static bool isBooleanType(QualType Ty) {
+  if (Ty->isBooleanType()) // C++ or C99
+    return true;
+  
+  if (const TypedefType *TT = Ty->getAs<TypedefType>())
+    return TT->getDecl()->getName() == "BOOL"   || // Objective-C
+           TT->getDecl()->getName() == "_Bool"  || // stdbool.h < C99
+           TT->getDecl()->getName() == "Boolean";  // MacTypes.h
+  
+  return false;
+}
+
+void BoolAssignmentChecker::checkBind(SVal loc, SVal val, const Stmt *S,
+                                      CheckerContext &C) const {
+  
+  // We are only interested in stores into Booleans.
+  const TypedValueRegion *TR =
+    dyn_cast_or_null<TypedValueRegion>(loc.getAsRegion());
+  
+  if (!TR)
+    return;
+  
+  QualType valTy = TR->getValueType();
+  
+  if (!isBooleanType(valTy))
+    return;
+  
+  // Get the value of the right-hand side.  We only care about values
+  // that are defined (UnknownVals and UndefinedVals are handled by other
+  // checkers).
+  Optional<DefinedSVal> DV = val.getAs<DefinedSVal>();
+  if (!DV)
+    return;
+    
+  // Check if the assigned value meets our criteria for correctness.  It must
+  // be a value that is either 0 or 1.  One way to check this is to see if
+  // the value is possibly < 0 (for a negative value) or greater than 1.
+  ProgramStateRef state = C.getState(); 
+  SValBuilder &svalBuilder = C.getSValBuilder();
+  ConstraintManager &CM = C.getConstraintManager();
+  
+  // First, ensure that the value is >= 0.  
+  DefinedSVal zeroVal = svalBuilder.makeIntVal(0, valTy);
+  SVal greaterThanOrEqualToZeroVal =
+    svalBuilder.evalBinOp(state, BO_GE, *DV, zeroVal,
+                          svalBuilder.getConditionType());
+
+  Optional<DefinedSVal> greaterThanEqualToZero =
+      greaterThanOrEqualToZeroVal.getAs<DefinedSVal>();
+
+  if (!greaterThanEqualToZero) {
+    // The SValBuilder cannot construct a valid SVal for this condition.
+    // This means we cannot properly reason about it.    
+    return;
+  }
+  
+  ProgramStateRef stateLT, stateGE;
+  llvm::tie(stateGE, stateLT) = CM.assumeDual(state, *greaterThanEqualToZero);
+  
+  // Is it possible for the value to be less than zero?
+  if (stateLT) {
+    // It is possible for the value to be less than zero.  We only
+    // want to emit a warning, however, if that value is fully constrained.
+    // If it it possible for the value to be >= 0, then essentially the
+    // value is underconstrained and there is nothing left to be done.
+    if (!stateGE)
+      emitReport(stateLT, C);
+    
+    // In either case, we are done.
+    return;
+  }
+  
+  // If we reach here, it must be the case that the value is constrained
+  // to only be >= 0.
+  assert(stateGE == state);
+  
+  // At this point we know that the value is >= 0.
+  // Now check to ensure that the value is <= 1.
+  DefinedSVal OneVal = svalBuilder.makeIntVal(1, valTy);
+  SVal lessThanEqToOneVal =
+    svalBuilder.evalBinOp(state, BO_LE, *DV, OneVal,
+                          svalBuilder.getConditionType());
+
+  Optional<DefinedSVal> lessThanEqToOne =
+      lessThanEqToOneVal.getAs<DefinedSVal>();
+
+  if (!lessThanEqToOne) {
+    // The SValBuilder cannot construct a valid SVal for this condition.
+    // This means we cannot properly reason about it.    
+    return;
+  }
+  
+  ProgramStateRef stateGT, stateLE;
+  llvm::tie(stateLE, stateGT) = CM.assumeDual(state, *lessThanEqToOne);
+  
+  // Is it possible for the value to be greater than one?
+  if (stateGT) {
+    // It is possible for the value to be greater than one.  We only
+    // want to emit a warning, however, if that value is fully constrained.
+    // If it is possible for the value to be <= 1, then essentially the
+    // value is underconstrained and there is nothing left to be done.
+    if (!stateLE)
+      emitReport(stateGT, C);
+    
+    // In either case, we are done.
+    return;
+  }
+  
+  // If we reach here, it must be the case that the value is constrained
+  // to only be <= 1.
+  assert(stateLE == state);
+}
+
+void ento::registerBoolAssignmentChecker(CheckerManager &mgr) {
+    mgr.registerChecker<BoolAssignmentChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
new file mode 100644
index 0000000..a3327d8
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
@@ -0,0 +1,83 @@
+//=== BuiltinFunctionChecker.cpp --------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker evaluates clang builtin functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class BuiltinFunctionChecker : public Checker<eval::Call> {
+public:
+  bool evalCall(const CallExpr *CE, CheckerContext &C) const;
+};
+
+}
+
+bool BuiltinFunctionChecker::evalCall(const CallExpr *CE,
+                                      CheckerContext &C) const {
+  ProgramStateRef state = C.getState();
+  const FunctionDecl *FD = C.getCalleeDecl(CE);
+  const LocationContext *LCtx = C.getLocationContext();
+  if (!FD)
+    return false;
+
+  unsigned id = FD->getBuiltinID();
+
+  if (!id)
+    return false;
+
+  switch (id) {
+  case Builtin::BI__builtin_expect: {
+    // For __builtin_expect, just return the value of the subexpression.
+    assert (CE->arg_begin() != CE->arg_end());
+    SVal X = state->getSVal(*(CE->arg_begin()), LCtx);
+    C.addTransition(state->BindExpr(CE, LCtx, X));
+    return true;
+  }
+
+  case Builtin::BI__builtin_alloca: {
+    // FIXME: Refactor into StoreManager itself?
+    MemRegionManager& RM = C.getStoreManager().getRegionManager();
+    const AllocaRegion* R =
+      RM.getAllocaRegion(CE, C.blockCount(), C.getLocationContext());
+
+    // Set the extent of the region in bytes. This enables us to use the
+    // SVal of the argument directly. If we save the extent in bits, we
+    // cannot represent values like symbol*8.
+    DefinedOrUnknownSVal Size =
+        state->getSVal(*(CE->arg_begin()), LCtx).castAs<DefinedOrUnknownSVal>();
+
+    SValBuilder& svalBuilder = C.getSValBuilder();
+    DefinedOrUnknownSVal Extent = R->getExtent(svalBuilder);
+    DefinedOrUnknownSVal extentMatchesSizeArg =
+      svalBuilder.evalEQ(state, Extent, Size);
+    state = state->assume(extentMatchesSizeArg, true);
+    assert(state && "The region should not have any previous constraints");
+
+    C.addTransition(state->BindExpr(CE, LCtx, loc::MemRegionVal(R)));
+    return true;
+  }
+  }
+
+  return false;
+}
+
+void ento::registerBuiltinFunctionChecker(CheckerManager &mgr) {
+  mgr.registerChecker<BuiltinFunctionChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CMakeLists.txt b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CMakeLists.txt
new file mode 100644
index 0000000..7da6825
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CMakeLists.txt
@@ -0,0 +1,91 @@
+clang_tablegen(Checkers.inc -gen-clang-sa-checkers
+  -I ${CMAKE_CURRENT_SOURCE_DIR}/../../../include
+  SOURCE Checkers.td
+  TARGET ClangSACheckers)
+
+add_clang_library(clangStaticAnalyzerCheckers
+  AllocationDiagnostics.cpp
+  AnalyzerStatsChecker.cpp
+  ArrayBoundChecker.cpp
+  ArrayBoundCheckerV2.cpp
+  BasicObjCFoundationChecks.cpp
+  BoolAssignmentChecker.cpp
+  BuiltinFunctionChecker.cpp
+  CStringChecker.cpp
+  CStringSyntaxChecker.cpp
+  CallAndMessageChecker.cpp
+  CastSizeChecker.cpp
+  CastToStructChecker.cpp
+  CheckObjCDealloc.cpp
+  CheckObjCInstMethSignature.cpp
+  CheckSecuritySyntaxOnly.cpp
+  CheckSizeofPointer.cpp
+  CheckerDocumentation.cpp
+  ChrootChecker.cpp
+  ClangCheckers.cpp
+  CommonBugCategories.cpp
+  DeadStoresChecker.cpp
+  DebugCheckers.cpp
+  DereferenceChecker.cpp
+  DirectIvarAssignment.cpp
+  DivZeroChecker.cpp
+  DynamicTypePropagation.cpp
+  ExprInspectionChecker.cpp
+  FixedAddressChecker.cpp
+  GenericTaintChecker.cpp
+  IdempotentOperationChecker.cpp
+  IvarInvalidationChecker.cpp
+  LLVMConventionsChecker.cpp
+  MacOSKeychainAPIChecker.cpp
+  MacOSXAPIChecker.cpp
+  MallocChecker.cpp
+  MallocOverflowSecurityChecker.cpp
+  MallocSizeofChecker.cpp
+  NSAutoreleasePoolChecker.cpp
+  NSErrorChecker.cpp
+  NoReturnFunctionChecker.cpp
+  NonNullParamChecker.cpp
+  ObjCAtSyncChecker.cpp
+  ObjCContainersASTChecker.cpp
+  ObjCContainersChecker.cpp
+  ObjCMissingSuperCallChecker.cpp
+  ObjCSelfInitChecker.cpp
+  ObjCUnusedIVarsChecker.cpp
+  PointerArithChecker.cpp
+  PointerSubChecker.cpp
+  PthreadLockChecker.cpp
+  RetainCountChecker.cpp
+  ReturnPointerRangeChecker.cpp
+  ReturnUndefChecker.cpp
+  SimpleStreamChecker.cpp
+  StackAddrEscapeChecker.cpp
+  StreamChecker.cpp
+  TaintTesterChecker.cpp
+  TraversalChecker.cpp
+  UndefBranchChecker.cpp
+  UndefCapturedBlockVarChecker.cpp
+  UndefResultChecker.cpp
+  UndefinedArraySubscriptChecker.cpp
+  UndefinedAssignmentChecker.cpp
+  UnixAPIChecker.cpp
+  UnreachableCodeChecker.cpp
+  VLASizeChecker.cpp
+  VirtualCallChecker.cpp
+  )
+
+add_dependencies(clangStaticAnalyzerCheckers
+  clangStaticAnalyzerCore
+  ClangAttrClasses
+  ClangAttrList
+  ClangCommentNodes
+  ClangDeclNodes
+  ClangDiagnosticCommon
+  ClangStmtNodes
+  ClangSACheckers
+  )
+
+target_link_libraries(clangStaticAnalyzerCheckers
+  clangBasic
+  clangAST
+  clangStaticAnalyzerCore
+  )
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
new file mode 100644
index 0000000..aa1ca6f
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
@@ -0,0 +1,2034 @@
+//= CStringChecker.cpp - Checks calls to C string functions --------*- C++ -*-//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines CStringChecker, which is an assortment of checks on calls
+// to functions in <string.h>.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "InterCheckerAPI.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class CStringChecker : public Checker< eval::Call,
+                                         check::PreStmt<DeclStmt>,
+                                         check::LiveSymbols,
+                                         check::DeadSymbols,
+                                         check::RegionChanges
+                                         > {
+  mutable OwningPtr<BugType> BT_Null,
+                             BT_Bounds,
+                             BT_Overlap,
+                             BT_NotCString,
+                             BT_AdditionOverflow;
+
+  mutable const char *CurrentFunctionDescription;
+
+public:
+  /// The filter is used to filter out the diagnostics which are not enabled by
+  /// the user.
+  struct CStringChecksFilter {
+    DefaultBool CheckCStringNullArg;
+    DefaultBool CheckCStringOutOfBounds;
+    DefaultBool CheckCStringBufferOverlap;
+    DefaultBool CheckCStringNotNullTerm;
+  };
+
+  CStringChecksFilter Filter;
+
+  static void *getTag() { static int tag; return &tag; }
+
+  bool evalCall(const CallExpr *CE, CheckerContext &C) const;
+  void checkPreStmt(const DeclStmt *DS, CheckerContext &C) const;
+  void checkLiveSymbols(ProgramStateRef state, SymbolReaper &SR) const;
+  void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
+  bool wantsRegionChangeUpdate(ProgramStateRef state) const;
+
+  ProgramStateRef 
+    checkRegionChanges(ProgramStateRef state,
+                       const InvalidatedSymbols *,
+                       ArrayRef<const MemRegion *> ExplicitRegions,
+                       ArrayRef<const MemRegion *> Regions,
+                       const CallEvent *Call) const;
+
+  typedef void (CStringChecker::*FnCheck)(CheckerContext &,
+                                          const CallExpr *) const;
+
+  void evalMemcpy(CheckerContext &C, const CallExpr *CE) const;
+  void evalMempcpy(CheckerContext &C, const CallExpr *CE) const;
+  void evalMemmove(CheckerContext &C, const CallExpr *CE) const;
+  void evalBcopy(CheckerContext &C, const CallExpr *CE) const;
+  void evalCopyCommon(CheckerContext &C, const CallExpr *CE,
+                      ProgramStateRef state,
+                      const Expr *Size,
+                      const Expr *Source,
+                      const Expr *Dest,
+                      bool Restricted = false,
+                      bool IsMempcpy = false) const;
+
+  void evalMemcmp(CheckerContext &C, const CallExpr *CE) const;
+
+  void evalstrLength(CheckerContext &C, const CallExpr *CE) const;
+  void evalstrnLength(CheckerContext &C, const CallExpr *CE) const;
+  void evalstrLengthCommon(CheckerContext &C,
+                           const CallExpr *CE, 
+                           bool IsStrnlen = false) const;
+
+  void evalStrcpy(CheckerContext &C, const CallExpr *CE) const;
+  void evalStrncpy(CheckerContext &C, const CallExpr *CE) const;
+  void evalStpcpy(CheckerContext &C, const CallExpr *CE) const;
+  void evalStrcpyCommon(CheckerContext &C,
+                        const CallExpr *CE,
+                        bool returnEnd,
+                        bool isBounded,
+                        bool isAppending) const;
+
+  void evalStrcat(CheckerContext &C, const CallExpr *CE) const;
+  void evalStrncat(CheckerContext &C, const CallExpr *CE) const;
+
+  void evalStrcmp(CheckerContext &C, const CallExpr *CE) const;
+  void evalStrncmp(CheckerContext &C, const CallExpr *CE) const;
+  void evalStrcasecmp(CheckerContext &C, const CallExpr *CE) const;
+  void evalStrncasecmp(CheckerContext &C, const CallExpr *CE) const;
+  void evalStrcmpCommon(CheckerContext &C,
+                        const CallExpr *CE,
+                        bool isBounded = false,
+                        bool ignoreCase = false) const;
+
+  void evalStrsep(CheckerContext &C, const CallExpr *CE) const;
+
+  // Utility methods
+  std::pair<ProgramStateRef , ProgramStateRef >
+  static assumeZero(CheckerContext &C,
+                    ProgramStateRef state, SVal V, QualType Ty);
+
+  static ProgramStateRef setCStringLength(ProgramStateRef state,
+                                              const MemRegion *MR,
+                                              SVal strLength);
+  static SVal getCStringLengthForRegion(CheckerContext &C,
+                                        ProgramStateRef &state,
+                                        const Expr *Ex,
+                                        const MemRegion *MR,
+                                        bool hypothetical);
+  SVal getCStringLength(CheckerContext &C,
+                        ProgramStateRef &state,
+                        const Expr *Ex,
+                        SVal Buf,
+                        bool hypothetical = false) const;
+
+  const StringLiteral *getCStringLiteral(CheckerContext &C, 
+                                         ProgramStateRef &state,
+                                         const Expr *expr,  
+                                         SVal val) const;
+
+  static ProgramStateRef InvalidateBuffer(CheckerContext &C,
+                                              ProgramStateRef state,
+                                              const Expr *Ex, SVal V);
+
+  static bool SummarizeRegion(raw_ostream &os, ASTContext &Ctx,
+                              const MemRegion *MR);
+
+  // Re-usable checks
+  ProgramStateRef checkNonNull(CheckerContext &C,
+                                   ProgramStateRef state,
+                                   const Expr *S,
+                                   SVal l) const;
+  ProgramStateRef CheckLocation(CheckerContext &C,
+                                    ProgramStateRef state,
+                                    const Expr *S,
+                                    SVal l,
+                                    const char *message = NULL) const;
+  ProgramStateRef CheckBufferAccess(CheckerContext &C,
+                                        ProgramStateRef state,
+                                        const Expr *Size,
+                                        const Expr *FirstBuf,
+                                        const Expr *SecondBuf,
+                                        const char *firstMessage = NULL,
+                                        const char *secondMessage = NULL,
+                                        bool WarnAboutSize = false) const;
+
+  ProgramStateRef CheckBufferAccess(CheckerContext &C,
+                                        ProgramStateRef state,
+                                        const Expr *Size,
+                                        const Expr *Buf,
+                                        const char *message = NULL,
+                                        bool WarnAboutSize = false) const {
+    // This is a convenience override.
+    return CheckBufferAccess(C, state, Size, Buf, NULL, message, NULL,
+                             WarnAboutSize);
+  }
+  ProgramStateRef CheckOverlap(CheckerContext &C,
+                                   ProgramStateRef state,
+                                   const Expr *Size,
+                                   const Expr *First,
+                                   const Expr *Second) const;
+  void emitOverlapBug(CheckerContext &C,
+                      ProgramStateRef state,
+                      const Stmt *First,
+                      const Stmt *Second) const;
+
+  ProgramStateRef checkAdditionOverflow(CheckerContext &C,
+                                            ProgramStateRef state,
+                                            NonLoc left,
+                                            NonLoc right) const;
+};
+
+} //end anonymous namespace
+
+REGISTER_MAP_WITH_PROGRAMSTATE(CStringLength, const MemRegion *, SVal)
+
+//===----------------------------------------------------------------------===//
+// Individual checks and utility methods.
+//===----------------------------------------------------------------------===//
+
+std::pair<ProgramStateRef , ProgramStateRef >
+CStringChecker::assumeZero(CheckerContext &C, ProgramStateRef state, SVal V,
+                           QualType Ty) {
+  Optional<DefinedSVal> val = V.getAs<DefinedSVal>();
+  if (!val)
+    return std::pair<ProgramStateRef , ProgramStateRef >(state, state);
+
+  SValBuilder &svalBuilder = C.getSValBuilder();
+  DefinedOrUnknownSVal zero = svalBuilder.makeZeroVal(Ty);
+  return state->assume(svalBuilder.evalEQ(state, *val, zero));
+}
+
+ProgramStateRef CStringChecker::checkNonNull(CheckerContext &C,
+                                            ProgramStateRef state,
+                                            const Expr *S, SVal l) const {
+  // If a previous check has failed, propagate the failure.
+  if (!state)
+    return NULL;
+
+  ProgramStateRef stateNull, stateNonNull;
+  llvm::tie(stateNull, stateNonNull) = assumeZero(C, state, l, S->getType());
+
+  if (stateNull && !stateNonNull) {
+    if (!Filter.CheckCStringNullArg)
+      return NULL;
+
+    ExplodedNode *N = C.generateSink(stateNull);
+    if (!N)
+      return NULL;
+
+    if (!BT_Null)
+      BT_Null.reset(new BuiltinBug("Unix API",
+        "Null pointer argument in call to byte string function"));
+
+    SmallString<80> buf;
+    llvm::raw_svector_ostream os(buf);
+    assert(CurrentFunctionDescription);
+    os << "Null pointer argument in call to " << CurrentFunctionDescription;
+
+    // Generate a report for this bug.
+    BuiltinBug *BT = static_cast<BuiltinBug*>(BT_Null.get());
+    BugReport *report = new BugReport(*BT, os.str(), N);
+
+    report->addRange(S->getSourceRange());
+    bugreporter::trackNullOrUndefValue(N, S, *report);
+    C.emitReport(report);
+    return NULL;
+  }
+
+  // From here on, assume that the value is non-null.
+  assert(stateNonNull);
+  return stateNonNull;
+}
+
+// FIXME: This was originally copied from ArrayBoundChecker.cpp. Refactor?
+ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
+                                             ProgramStateRef state,
+                                             const Expr *S, SVal l,
+                                             const char *warningMsg) const {
+  // If a previous check has failed, propagate the failure.
+  if (!state)
+    return NULL;
+
+  // Check for out of bound array element access.
+  const MemRegion *R = l.getAsRegion();
+  if (!R)
+    return state;
+
+  const ElementRegion *ER = dyn_cast<ElementRegion>(R);
+  if (!ER)
+    return state;
+
+  assert(ER->getValueType() == C.getASTContext().CharTy &&
+    "CheckLocation should only be called with char* ElementRegions");
+
+  // Get the size of the array.
+  const SubRegion *superReg = cast<SubRegion>(ER->getSuperRegion());
+  SValBuilder &svalBuilder = C.getSValBuilder();
+  SVal Extent = 
+    svalBuilder.convertToArrayIndex(superReg->getExtent(svalBuilder));
+  DefinedOrUnknownSVal Size = Extent.castAs<DefinedOrUnknownSVal>();
+
+  // Get the index of the accessed element.
+  DefinedOrUnknownSVal Idx = ER->getIndex().castAs<DefinedOrUnknownSVal>();
+
+  ProgramStateRef StInBound = state->assumeInBound(Idx, Size, true);
+  ProgramStateRef StOutBound = state->assumeInBound(Idx, Size, false);
+  if (StOutBound && !StInBound) {
+    ExplodedNode *N = C.generateSink(StOutBound);
+    if (!N)
+      return NULL;
+
+    if (!BT_Bounds) {
+      BT_Bounds.reset(new BuiltinBug("Out-of-bound array access",
+        "Byte string function accesses out-of-bound array element"));
+    }
+    BuiltinBug *BT = static_cast<BuiltinBug*>(BT_Bounds.get());
+
+    // Generate a report for this bug.
+    BugReport *report;
+    if (warningMsg) {
+      report = new BugReport(*BT, warningMsg, N);
+    } else {
+      assert(CurrentFunctionDescription);
+      assert(CurrentFunctionDescription[0] != '\0');
+
+      SmallString<80> buf;
+      llvm::raw_svector_ostream os(buf);
+      os << toUppercase(CurrentFunctionDescription[0])
+         << &CurrentFunctionDescription[1]
+         << " accesses out-of-bound array element";
+      report = new BugReport(*BT, os.str(), N);      
+    }
+
+    // FIXME: It would be nice to eventually make this diagnostic more clear,
+    // e.g., by referencing the original declaration or by saying *why* this
+    // reference is outside the range.
+
+    report->addRange(S->getSourceRange());
+    C.emitReport(report);
+    return NULL;
+  }
+  
+  // Array bound check succeeded.  From this point forward the array bound
+  // should always succeed.
+  return StInBound;
+}
+
+ProgramStateRef CStringChecker::CheckBufferAccess(CheckerContext &C,
+                                                 ProgramStateRef state,
+                                                 const Expr *Size,
+                                                 const Expr *FirstBuf,
+                                                 const Expr *SecondBuf,
+                                                 const char *firstMessage,
+                                                 const char *secondMessage,
+                                                 bool WarnAboutSize) const {
+  // If a previous check has failed, propagate the failure.
+  if (!state)
+    return NULL;
+
+  SValBuilder &svalBuilder = C.getSValBuilder();
+  ASTContext &Ctx = svalBuilder.getContext();
+  const LocationContext *LCtx = C.getLocationContext();
+
+  QualType sizeTy = Size->getType();
+  QualType PtrTy = Ctx.getPointerType(Ctx.CharTy);
+
+  // Check that the first buffer is non-null.
+  SVal BufVal = state->getSVal(FirstBuf, LCtx);
+  state = checkNonNull(C, state, FirstBuf, BufVal);
+  if (!state)
+    return NULL;
+
+  // If out-of-bounds checking is turned off, skip the rest.
+  if (!Filter.CheckCStringOutOfBounds)
+    return state;
+
+  // Get the access length and make sure it is known.
+  // FIXME: This assumes the caller has already checked that the access length
+  // is positive. And that it's unsigned.
+  SVal LengthVal = state->getSVal(Size, LCtx);
+  Optional<NonLoc> Length = LengthVal.getAs<NonLoc>();
+  if (!Length)
+    return state;
+
+  // Compute the offset of the last element to be accessed: size-1.
+  NonLoc One = svalBuilder.makeIntVal(1, sizeTy).castAs<NonLoc>();
+  NonLoc LastOffset = svalBuilder
+      .evalBinOpNN(state, BO_Sub, *Length, One, sizeTy).castAs<NonLoc>();
+
+  // Check that the first buffer is sufficiently long.
+  SVal BufStart = svalBuilder.evalCast(BufVal, PtrTy, FirstBuf->getType());
+  if (Optional<Loc> BufLoc = BufStart.getAs<Loc>()) {
+    const Expr *warningExpr = (WarnAboutSize ? Size : FirstBuf);
+
+    SVal BufEnd = svalBuilder.evalBinOpLN(state, BO_Add, *BufLoc,
+                                          LastOffset, PtrTy);
+    state = CheckLocation(C, state, warningExpr, BufEnd, firstMessage);
+
+    // If the buffer isn't large enough, abort.
+    if (!state)
+      return NULL;
+  }
+
+  // If there's a second buffer, check it as well.
+  if (SecondBuf) {
+    BufVal = state->getSVal(SecondBuf, LCtx);
+    state = checkNonNull(C, state, SecondBuf, BufVal);
+    if (!state)
+      return NULL;
+
+    BufStart = svalBuilder.evalCast(BufVal, PtrTy, SecondBuf->getType());
+    if (Optional<Loc> BufLoc = BufStart.getAs<Loc>()) {
+      const Expr *warningExpr = (WarnAboutSize ? Size : SecondBuf);
+
+      SVal BufEnd = svalBuilder.evalBinOpLN(state, BO_Add, *BufLoc,
+                                            LastOffset, PtrTy);
+      state = CheckLocation(C, state, warningExpr, BufEnd, secondMessage);
+    }
+  }
+
+  // Large enough or not, return this state!
+  return state;
+}
+
+ProgramStateRef CStringChecker::CheckOverlap(CheckerContext &C,
+                                            ProgramStateRef state,
+                                            const Expr *Size,
+                                            const Expr *First,
+                                            const Expr *Second) const {
+  if (!Filter.CheckCStringBufferOverlap)
+    return state;
+
+  // Do a simple check for overlap: if the two arguments are from the same
+  // buffer, see if the end of the first is greater than the start of the second
+  // or vice versa.
+
+  // If a previous check has failed, propagate the failure.
+  if (!state)
+    return NULL;
+
+  ProgramStateRef stateTrue, stateFalse;
+
+  // Get the buffer values and make sure they're known locations.
+  const LocationContext *LCtx = C.getLocationContext();
+  SVal firstVal = state->getSVal(First, LCtx);
+  SVal secondVal = state->getSVal(Second, LCtx);
+
+  Optional<Loc> firstLoc = firstVal.getAs<Loc>();
+  if (!firstLoc)
+    return state;
+
+  Optional<Loc> secondLoc = secondVal.getAs<Loc>();
+  if (!secondLoc)
+    return state;
+
+  // Are the two values the same?
+  SValBuilder &svalBuilder = C.getSValBuilder();  
+  llvm::tie(stateTrue, stateFalse) =
+    state->assume(svalBuilder.evalEQ(state, *firstLoc, *secondLoc));
+
+  if (stateTrue && !stateFalse) {
+    // If the values are known to be equal, that's automatically an overlap.
+    emitOverlapBug(C, stateTrue, First, Second);
+    return NULL;
+  }
+
+  // assume the two expressions are not equal.
+  assert(stateFalse);
+  state = stateFalse;
+
+  // Which value comes first?
+  QualType cmpTy = svalBuilder.getConditionType();
+  SVal reverse = svalBuilder.evalBinOpLL(state, BO_GT,
+                                         *firstLoc, *secondLoc, cmpTy);
+  Optional<DefinedOrUnknownSVal> reverseTest =
+      reverse.getAs<DefinedOrUnknownSVal>();
+  if (!reverseTest)
+    return state;
+
+  llvm::tie(stateTrue, stateFalse) = state->assume(*reverseTest);
+  if (stateTrue) {
+    if (stateFalse) {
+      // If we don't know which one comes first, we can't perform this test.
+      return state;
+    } else {
+      // Switch the values so that firstVal is before secondVal.
+      std::swap(firstLoc, secondLoc);
+
+      // Switch the Exprs as well, so that they still correspond.
+      std::swap(First, Second);
+    }
+  }
+
+  // Get the length, and make sure it too is known.
+  SVal LengthVal = state->getSVal(Size, LCtx);
+  Optional<NonLoc> Length = LengthVal.getAs<NonLoc>();
+  if (!Length)
+    return state;
+
+  // Convert the first buffer's start address to char*.
+  // Bail out if the cast fails.
+  ASTContext &Ctx = svalBuilder.getContext();
+  QualType CharPtrTy = Ctx.getPointerType(Ctx.CharTy);
+  SVal FirstStart = svalBuilder.evalCast(*firstLoc, CharPtrTy, 
+                                         First->getType());
+  Optional<Loc> FirstStartLoc = FirstStart.getAs<Loc>();
+  if (!FirstStartLoc)
+    return state;
+
+  // Compute the end of the first buffer. Bail out if THAT fails.
+  SVal FirstEnd = svalBuilder.evalBinOpLN(state, BO_Add,
+                                 *FirstStartLoc, *Length, CharPtrTy);
+  Optional<Loc> FirstEndLoc = FirstEnd.getAs<Loc>();
+  if (!FirstEndLoc)
+    return state;
+
+  // Is the end of the first buffer past the start of the second buffer?
+  SVal Overlap = svalBuilder.evalBinOpLL(state, BO_GT,
+                                *FirstEndLoc, *secondLoc, cmpTy);
+  Optional<DefinedOrUnknownSVal> OverlapTest =
+      Overlap.getAs<DefinedOrUnknownSVal>();
+  if (!OverlapTest)
+    return state;
+
+  llvm::tie(stateTrue, stateFalse) = state->assume(*OverlapTest);
+
+  if (stateTrue && !stateFalse) {
+    // Overlap!
+    emitOverlapBug(C, stateTrue, First, Second);
+    return NULL;
+  }
+
+  // assume the two expressions don't overlap.
+  assert(stateFalse);
+  return stateFalse;
+}
+
+void CStringChecker::emitOverlapBug(CheckerContext &C, ProgramStateRef state,
+                                  const Stmt *First, const Stmt *Second) const {
+  ExplodedNode *N = C.generateSink(state);
+  if (!N)
+    return;
+
+  if (!BT_Overlap)
+    BT_Overlap.reset(new BugType("Unix API", "Improper arguments"));
+
+  // Generate a report for this bug.
+  BugReport *report = 
+    new BugReport(*BT_Overlap,
+      "Arguments must not be overlapping buffers", N);
+  report->addRange(First->getSourceRange());
+  report->addRange(Second->getSourceRange());
+
+  C.emitReport(report);
+}
+
+ProgramStateRef CStringChecker::checkAdditionOverflow(CheckerContext &C,
+                                                     ProgramStateRef state,
+                                                     NonLoc left,
+                                                     NonLoc right) const {
+  // If out-of-bounds checking is turned off, skip the rest.
+  if (!Filter.CheckCStringOutOfBounds)
+    return state;
+
+  // If a previous check has failed, propagate the failure.
+  if (!state)
+    return NULL;
+
+  SValBuilder &svalBuilder = C.getSValBuilder();
+  BasicValueFactory &BVF = svalBuilder.getBasicValueFactory();
+
+  QualType sizeTy = svalBuilder.getContext().getSizeType();
+  const llvm::APSInt &maxValInt = BVF.getMaxValue(sizeTy);
+  NonLoc maxVal = svalBuilder.makeIntVal(maxValInt);
+
+  SVal maxMinusRight;
+  if (right.getAs<nonloc::ConcreteInt>()) {
+    maxMinusRight = svalBuilder.evalBinOpNN(state, BO_Sub, maxVal, right,
+                                                 sizeTy);
+  } else {
+    // Try switching the operands. (The order of these two assignments is
+    // important!)
+    maxMinusRight = svalBuilder.evalBinOpNN(state, BO_Sub, maxVal, left, 
+                                            sizeTy);
+    left = right;
+  }
+
+  if (Optional<NonLoc> maxMinusRightNL = maxMinusRight.getAs<NonLoc>()) {
+    QualType cmpTy = svalBuilder.getConditionType();
+    // If left > max - right, we have an overflow.
+    SVal willOverflow = svalBuilder.evalBinOpNN(state, BO_GT, left,
+                                                *maxMinusRightNL, cmpTy);
+
+    ProgramStateRef stateOverflow, stateOkay;
+    llvm::tie(stateOverflow, stateOkay) =
+      state->assume(willOverflow.castAs<DefinedOrUnknownSVal>());
+
+    if (stateOverflow && !stateOkay) {
+      // We have an overflow. Emit a bug report.
+      ExplodedNode *N = C.generateSink(stateOverflow);
+      if (!N)
+        return NULL;
+
+      if (!BT_AdditionOverflow)
+        BT_AdditionOverflow.reset(new BuiltinBug("API",
+          "Sum of expressions causes overflow"));
+
+      // This isn't a great error message, but this should never occur in real
+      // code anyway -- you'd have to create a buffer longer than a size_t can
+      // represent, which is sort of a contradiction.
+      const char *warning =
+        "This expression will create a string whose length is too big to "
+        "be represented as a size_t";
+
+      // Generate a report for this bug.
+      BugReport *report = new BugReport(*BT_AdditionOverflow, warning, N);
+      C.emitReport(report);        
+
+      return NULL;
+    }
+
+    // From now on, assume an overflow didn't occur.
+    assert(stateOkay);
+    state = stateOkay;
+  }
+
+  return state;
+}
+
+ProgramStateRef CStringChecker::setCStringLength(ProgramStateRef state,
+                                                const MemRegion *MR,
+                                                SVal strLength) {
+  assert(!strLength.isUndef() && "Attempt to set an undefined string length");
+
+  MR = MR->StripCasts();
+
+  switch (MR->getKind()) {
+  case MemRegion::StringRegionKind:
+    // FIXME: This can happen if we strcpy() into a string region. This is
+    // undefined [C99 6.4.5p6], but we should still warn about it.
+    return state;
+
+  case MemRegion::SymbolicRegionKind:
+  case MemRegion::AllocaRegionKind:
+  case MemRegion::VarRegionKind:
+  case MemRegion::FieldRegionKind:
+  case MemRegion::ObjCIvarRegionKind:
+    // These are the types we can currently track string lengths for.
+    break;
+
+  case MemRegion::ElementRegionKind:
+    // FIXME: Handle element regions by upper-bounding the parent region's
+    // string length.
+    return state;
+
+  default:
+    // Other regions (mostly non-data) can't have a reliable C string length.
+    // For now, just ignore the change.
+    // FIXME: These are rare but not impossible. We should output some kind of
+    // warning for things like strcpy((char[]){'a', 0}, "b");
+    return state;
+  }
+
+  if (strLength.isUnknown())
+    return state->remove<CStringLength>(MR);
+
+  return state->set<CStringLength>(MR, strLength);
+}
+
+SVal CStringChecker::getCStringLengthForRegion(CheckerContext &C,
+                                               ProgramStateRef &state,
+                                               const Expr *Ex,
+                                               const MemRegion *MR,
+                                               bool hypothetical) {
+  if (!hypothetical) {
+    // If there's a recorded length, go ahead and return it.
+    const SVal *Recorded = state->get<CStringLength>(MR);
+    if (Recorded)
+      return *Recorded;
+  }
+  
+  // Otherwise, get a new symbol and update the state.
+  SValBuilder &svalBuilder = C.getSValBuilder();
+  QualType sizeTy = svalBuilder.getContext().getSizeType();
+  SVal strLength = svalBuilder.getMetadataSymbolVal(CStringChecker::getTag(),
+                                                    MR, Ex, sizeTy,
+                                                    C.blockCount());
+
+  if (!hypothetical)
+    state = state->set<CStringLength>(MR, strLength);
+
+  return strLength;
+}
+
+SVal CStringChecker::getCStringLength(CheckerContext &C, ProgramStateRef &state,
+                                      const Expr *Ex, SVal Buf,
+                                      bool hypothetical) const {
+  const MemRegion *MR = Buf.getAsRegion();
+  if (!MR) {
+    // If we can't get a region, see if it's something we /know/ isn't a
+    // C string. In the context of locations, the only time we can issue such
+    // a warning is for labels.
+    if (Optional<loc::GotoLabel> Label = Buf.getAs<loc::GotoLabel>()) {
+      if (!Filter.CheckCStringNotNullTerm)
+        return UndefinedVal();
+
+      if (ExplodedNode *N = C.addTransition(state)) {
+        if (!BT_NotCString)
+          BT_NotCString.reset(new BuiltinBug("Unix API",
+            "Argument is not a null-terminated string."));
+
+        SmallString<120> buf;
+        llvm::raw_svector_ostream os(buf);
+        assert(CurrentFunctionDescription);
+        os << "Argument to " << CurrentFunctionDescription
+           << " is the address of the label '" << Label->getLabel()->getName()
+           << "', which is not a null-terminated string";
+
+        // Generate a report for this bug.
+        BugReport *report = new BugReport(*BT_NotCString,
+                                                          os.str(), N);
+
+        report->addRange(Ex->getSourceRange());
+        C.emitReport(report);        
+      }
+      return UndefinedVal();
+
+    }
+
+    // If it's not a region and not a label, give up.
+    return UnknownVal();
+  }
+
+  // If we have a region, strip casts from it and see if we can figure out
+  // its length. For anything we can't figure out, just return UnknownVal.
+  MR = MR->StripCasts();
+
+  switch (MR->getKind()) {
+  case MemRegion::StringRegionKind: {
+    // Modifying the contents of string regions is undefined [C99 6.4.5p6],
+    // so we can assume that the byte length is the correct C string length.
+    SValBuilder &svalBuilder = C.getSValBuilder();
+    QualType sizeTy = svalBuilder.getContext().getSizeType();
+    const StringLiteral *strLit = cast<StringRegion>(MR)->getStringLiteral();
+    return svalBuilder.makeIntVal(strLit->getByteLength(), sizeTy);
+  }
+  case MemRegion::SymbolicRegionKind:
+  case MemRegion::AllocaRegionKind:
+  case MemRegion::VarRegionKind:
+  case MemRegion::FieldRegionKind:
+  case MemRegion::ObjCIvarRegionKind:
+    return getCStringLengthForRegion(C, state, Ex, MR, hypothetical);
+  case MemRegion::CompoundLiteralRegionKind:
+    // FIXME: Can we track this? Is it necessary?
+    return UnknownVal();
+  case MemRegion::ElementRegionKind:
+    // FIXME: How can we handle this? It's not good enough to subtract the
+    // offset from the base string length; consider "123\x00567" and &a[5].
+    return UnknownVal();
+  default:
+    // Other regions (mostly non-data) can't have a reliable C string length.
+    // In this case, an error is emitted and UndefinedVal is returned.
+    // The caller should always be prepared to handle this case.
+    if (!Filter.CheckCStringNotNullTerm)
+      return UndefinedVal();
+
+    if (ExplodedNode *N = C.addTransition(state)) {
+      if (!BT_NotCString)
+        BT_NotCString.reset(new BuiltinBug("Unix API",
+          "Argument is not a null-terminated string."));
+
+      SmallString<120> buf;
+      llvm::raw_svector_ostream os(buf);
+
+      assert(CurrentFunctionDescription);
+      os << "Argument to " << CurrentFunctionDescription << " is ";
+
+      if (SummarizeRegion(os, C.getASTContext(), MR))
+        os << ", which is not a null-terminated string";
+      else
+        os << "not a null-terminated string";
+
+      // Generate a report for this bug.
+      BugReport *report = new BugReport(*BT_NotCString,
+                                                        os.str(), N);
+
+      report->addRange(Ex->getSourceRange());
+      C.emitReport(report);        
+    }
+
+    return UndefinedVal();
+  }
+}
+
+const StringLiteral *CStringChecker::getCStringLiteral(CheckerContext &C,
+  ProgramStateRef &state, const Expr *expr, SVal val) const {
+
+  // Get the memory region pointed to by the val.
+  const MemRegion *bufRegion = val.getAsRegion();
+  if (!bufRegion)
+    return NULL; 
+
+  // Strip casts off the memory region.
+  bufRegion = bufRegion->StripCasts();
+
+  // Cast the memory region to a string region.
+  const StringRegion *strRegion= dyn_cast<StringRegion>(bufRegion);
+  if (!strRegion)
+    return NULL; 
+
+  // Return the actual string in the string region.
+  return strRegion->getStringLiteral();
+}
+
+ProgramStateRef CStringChecker::InvalidateBuffer(CheckerContext &C,
+                                                ProgramStateRef state,
+                                                const Expr *E, SVal V) {
+  Optional<Loc> L = V.getAs<Loc>();
+  if (!L)
+    return state;
+
+  // FIXME: This is a simplified version of what's in CFRefCount.cpp -- it makes
+  // some assumptions about the value that CFRefCount can't. Even so, it should
+  // probably be refactored.
+  if (Optional<loc::MemRegionVal> MR = L->getAs<loc::MemRegionVal>()) {
+    const MemRegion *R = MR->getRegion()->StripCasts();
+
+    // Are we dealing with an ElementRegion?  If so, we should be invalidating
+    // the super-region.
+    if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
+      R = ER->getSuperRegion();
+      // FIXME: What about layers of ElementRegions?
+    }
+
+    // Invalidate this region.
+    const LocationContext *LCtx = C.getPredecessor()->getLocationContext();
+    return state->invalidateRegions(R, E, C.blockCount(), LCtx,
+                                    /*CausesPointerEscape*/ false);
+  }
+
+  // If we have a non-region value by chance, just remove the binding.
+  // FIXME: is this necessary or correct? This handles the non-Region
+  //  cases.  Is it ever valid to store to these?
+  return state->killBinding(*L);
+}
+
+bool CStringChecker::SummarizeRegion(raw_ostream &os, ASTContext &Ctx,
+                                     const MemRegion *MR) {
+  const TypedValueRegion *TVR = dyn_cast<TypedValueRegion>(MR);
+
+  switch (MR->getKind()) {
+  case MemRegion::FunctionTextRegionKind: {
+    const NamedDecl *FD = cast<FunctionTextRegion>(MR)->getDecl();
+    if (FD)
+      os << "the address of the function '" << *FD << '\'';
+    else
+      os << "the address of a function";
+    return true;
+  }
+  case MemRegion::BlockTextRegionKind:
+    os << "block text";
+    return true;
+  case MemRegion::BlockDataRegionKind:
+    os << "a block";
+    return true;
+  case MemRegion::CXXThisRegionKind:
+  case MemRegion::CXXTempObjectRegionKind:
+    os << "a C++ temp object of type " << TVR->getValueType().getAsString();
+    return true;
+  case MemRegion::VarRegionKind:
+    os << "a variable of type" << TVR->getValueType().getAsString();
+    return true;
+  case MemRegion::FieldRegionKind:
+    os << "a field of type " << TVR->getValueType().getAsString();
+    return true;
+  case MemRegion::ObjCIvarRegionKind:
+    os << "an instance variable of type " << TVR->getValueType().getAsString();
+    return true;
+  default:
+    return false;
+  }
+}
+
+//===----------------------------------------------------------------------===//
+// evaluation of individual function calls.
+//===----------------------------------------------------------------------===//
+
+void CStringChecker::evalCopyCommon(CheckerContext &C, 
+                                    const CallExpr *CE,
+                                    ProgramStateRef state,
+                                    const Expr *Size, const Expr *Dest,
+                                    const Expr *Source, bool Restricted,
+                                    bool IsMempcpy) const {
+  CurrentFunctionDescription = "memory copy function";
+
+  // See if the size argument is zero.
+  const LocationContext *LCtx = C.getLocationContext();
+  SVal sizeVal = state->getSVal(Size, LCtx);
+  QualType sizeTy = Size->getType();
+
+  ProgramStateRef stateZeroSize, stateNonZeroSize;
+  llvm::tie(stateZeroSize, stateNonZeroSize) =
+    assumeZero(C, state, sizeVal, sizeTy);
+
+  // Get the value of the Dest.
+  SVal destVal = state->getSVal(Dest, LCtx);
+
+  // If the size is zero, there won't be any actual memory access, so
+  // just bind the return value to the destination buffer and return.
+  if (stateZeroSize && !stateNonZeroSize) {
+    stateZeroSize = stateZeroSize->BindExpr(CE, LCtx, destVal);
+    C.addTransition(stateZeroSize);
+    return;
+  }
+
+  // If the size can be nonzero, we have to check the other arguments.
+  if (stateNonZeroSize) {
+    state = stateNonZeroSize;
+
+    // Ensure the destination is not null. If it is NULL there will be a
+    // NULL pointer dereference.
+    state = checkNonNull(C, state, Dest, destVal);
+    if (!state)
+      return;
+
+    // Get the value of the Src.
+    SVal srcVal = state->getSVal(Source, LCtx);
+    
+    // Ensure the source is not null. If it is NULL there will be a
+    // NULL pointer dereference.
+    state = checkNonNull(C, state, Source, srcVal);
+    if (!state)
+      return;
+
+    // Ensure the accesses are valid and that the buffers do not overlap.
+    const char * const writeWarning =
+      "Memory copy function overflows destination buffer";
+    state = CheckBufferAccess(C, state, Size, Dest, Source,
+                              writeWarning, /* sourceWarning = */ NULL);
+    if (Restricted)
+      state = CheckOverlap(C, state, Size, Dest, Source);
+
+    if (!state)
+      return;
+
+    // If this is mempcpy, get the byte after the last byte copied and 
+    // bind the expr.
+    if (IsMempcpy) {
+      loc::MemRegionVal destRegVal = destVal.castAs<loc::MemRegionVal>();
+      
+      // Get the length to copy.
+      if (Optional<NonLoc> lenValNonLoc = sizeVal.getAs<NonLoc>()) {
+        // Get the byte after the last byte copied.
+        SVal lastElement = C.getSValBuilder().evalBinOpLN(state, BO_Add, 
+                                                          destRegVal,
+                                                          *lenValNonLoc, 
+                                                          Dest->getType());
+      
+        // The byte after the last byte copied is the return value.
+        state = state->BindExpr(CE, LCtx, lastElement);
+      } else {
+        // If we don't know how much we copied, we can at least
+        // conjure a return value for later.
+        SVal result = C.getSValBuilder().conjureSymbolVal(0, CE, LCtx,
+                                                          C.blockCount());
+        state = state->BindExpr(CE, LCtx, result);
+      }
+
+    } else {
+      // All other copies return the destination buffer.
+      // (Well, bcopy() has a void return type, but this won't hurt.)
+      state = state->BindExpr(CE, LCtx, destVal);
+    }
+
+    // Invalidate the destination.
+    // FIXME: Even if we can't perfectly model the copy, we should see if we
+    // can use LazyCompoundVals to copy the source values into the destination.
+    // This would probably remove any existing bindings past the end of the
+    // copied region, but that's still an improvement over blank invalidation.
+    state = InvalidateBuffer(C, state, Dest,
+                             state->getSVal(Dest, C.getLocationContext()));
+    C.addTransition(state);
+  }
+}
+
+
+void CStringChecker::evalMemcpy(CheckerContext &C, const CallExpr *CE) const {
+  if (CE->getNumArgs() < 3)
+    return;
+
+  // void *memcpy(void *restrict dst, const void *restrict src, size_t n);
+  // The return value is the address of the destination buffer.
+  const Expr *Dest = CE->getArg(0);
+  ProgramStateRef state = C.getState();
+
+  evalCopyCommon(C, CE, state, CE->getArg(2), Dest, CE->getArg(1), true);
+}
+
+void CStringChecker::evalMempcpy(CheckerContext &C, const CallExpr *CE) const {
+  if (CE->getNumArgs() < 3)
+    return;
+
+  // void *mempcpy(void *restrict dst, const void *restrict src, size_t n);
+  // The return value is a pointer to the byte following the last written byte.
+  const Expr *Dest = CE->getArg(0);
+  ProgramStateRef state = C.getState();
+  
+  evalCopyCommon(C, CE, state, CE->getArg(2), Dest, CE->getArg(1), true, true);
+}
+
+void CStringChecker::evalMemmove(CheckerContext &C, const CallExpr *CE) const {
+  if (CE->getNumArgs() < 3)
+    return;
+
+  // void *memmove(void *dst, const void *src, size_t n);
+  // The return value is the address of the destination buffer.
+  const Expr *Dest = CE->getArg(0);
+  ProgramStateRef state = C.getState();
+
+  evalCopyCommon(C, CE, state, CE->getArg(2), Dest, CE->getArg(1));
+}
+
+void CStringChecker::evalBcopy(CheckerContext &C, const CallExpr *CE) const {
+  if (CE->getNumArgs() < 3)
+    return;
+
+  // void bcopy(const void *src, void *dst, size_t n);
+  evalCopyCommon(C, CE, C.getState(), 
+                 CE->getArg(2), CE->getArg(1), CE->getArg(0));
+}
+
+void CStringChecker::evalMemcmp(CheckerContext &C, const CallExpr *CE) const {
+  if (CE->getNumArgs() < 3)
+    return;
+
+  // int memcmp(const void *s1, const void *s2, size_t n);
+  CurrentFunctionDescription = "memory comparison function";
+
+  const Expr *Left = CE->getArg(0);
+  const Expr *Right = CE->getArg(1);
+  const Expr *Size = CE->getArg(2);
+
+  ProgramStateRef state = C.getState();
+  SValBuilder &svalBuilder = C.getSValBuilder();
+
+  // See if the size argument is zero.
+  const LocationContext *LCtx = C.getLocationContext();
+  SVal sizeVal = state->getSVal(Size, LCtx);
+  QualType sizeTy = Size->getType();
+
+  ProgramStateRef stateZeroSize, stateNonZeroSize;
+  llvm::tie(stateZeroSize, stateNonZeroSize) =
+    assumeZero(C, state, sizeVal, sizeTy);
+
+  // If the size can be zero, the result will be 0 in that case, and we don't
+  // have to check either of the buffers.
+  if (stateZeroSize) {
+    state = stateZeroSize;
+    state = state->BindExpr(CE, LCtx,
+                            svalBuilder.makeZeroVal(CE->getType()));
+    C.addTransition(state);
+  }
+
+  // If the size can be nonzero, we have to check the other arguments.
+  if (stateNonZeroSize) {
+    state = stateNonZeroSize;
+    // If we know the two buffers are the same, we know the result is 0.
+    // First, get the two buffers' addresses. Another checker will have already
+    // made sure they're not undefined.
+    DefinedOrUnknownSVal LV =
+        state->getSVal(Left, LCtx).castAs<DefinedOrUnknownSVal>();
+    DefinedOrUnknownSVal RV =
+        state->getSVal(Right, LCtx).castAs<DefinedOrUnknownSVal>();
+
+    // See if they are the same.
+    DefinedOrUnknownSVal SameBuf = svalBuilder.evalEQ(state, LV, RV);
+    ProgramStateRef StSameBuf, StNotSameBuf;
+    llvm::tie(StSameBuf, StNotSameBuf) = state->assume(SameBuf);
+
+    // If the two arguments might be the same buffer, we know the result is 0,
+    // and we only need to check one size.
+    if (StSameBuf) {
+      state = StSameBuf;
+      state = CheckBufferAccess(C, state, Size, Left);
+      if (state) {
+        state = StSameBuf->BindExpr(CE, LCtx,
+                                    svalBuilder.makeZeroVal(CE->getType()));
+        C.addTransition(state);
+      }
+    }
+
+    // If the two arguments might be different buffers, we have to check the
+    // size of both of them.
+    if (StNotSameBuf) {
+      state = StNotSameBuf;
+      state = CheckBufferAccess(C, state, Size, Left, Right);
+      if (state) {
+        // The return value is the comparison result, which we don't know.
+        SVal CmpV = svalBuilder.conjureSymbolVal(0, CE, LCtx, C.blockCount());
+        state = state->BindExpr(CE, LCtx, CmpV);
+        C.addTransition(state);
+      }
+    }
+  }
+}
+
+void CStringChecker::evalstrLength(CheckerContext &C,
+                                   const CallExpr *CE) const {
+  if (CE->getNumArgs() < 1)
+    return;
+
+  // size_t strlen(const char *s);
+  evalstrLengthCommon(C, CE, /* IsStrnlen = */ false);
+}
+
+void CStringChecker::evalstrnLength(CheckerContext &C,
+                                    const CallExpr *CE) const {
+  if (CE->getNumArgs() < 2)
+    return;
+
+  // size_t strnlen(const char *s, size_t maxlen);
+  evalstrLengthCommon(C, CE, /* IsStrnlen = */ true);
+}
+
+void CStringChecker::evalstrLengthCommon(CheckerContext &C, const CallExpr *CE,
+                                         bool IsStrnlen) const {
+  CurrentFunctionDescription = "string length function";
+  ProgramStateRef state = C.getState();
+  const LocationContext *LCtx = C.getLocationContext();
+
+  if (IsStrnlen) {
+    const Expr *maxlenExpr = CE->getArg(1);
+    SVal maxlenVal = state->getSVal(maxlenExpr, LCtx);
+
+    ProgramStateRef stateZeroSize, stateNonZeroSize;
+    llvm::tie(stateZeroSize, stateNonZeroSize) =
+      assumeZero(C, state, maxlenVal, maxlenExpr->getType());
+
+    // If the size can be zero, the result will be 0 in that case, and we don't
+    // have to check the string itself.
+    if (stateZeroSize) {
+      SVal zero = C.getSValBuilder().makeZeroVal(CE->getType());
+      stateZeroSize = stateZeroSize->BindExpr(CE, LCtx, zero);
+      C.addTransition(stateZeroSize);
+    }
+
+    // If the size is GUARANTEED to be zero, we're done!
+    if (!stateNonZeroSize)
+      return;
+
+    // Otherwise, record the assumption that the size is nonzero.
+    state = stateNonZeroSize;
+  }
+
+  // Check that the string argument is non-null.
+  const Expr *Arg = CE->getArg(0);
+  SVal ArgVal = state->getSVal(Arg, LCtx);
+
+  state = checkNonNull(C, state, Arg, ArgVal);
+
+  if (!state)
+    return;
+
+  SVal strLength = getCStringLength(C, state, Arg, ArgVal);
+
+  // If the argument isn't a valid C string, there's no valid state to
+  // transition to.
+  if (strLength.isUndef())
+    return;
+
+  DefinedOrUnknownSVal result = UnknownVal();
+
+  // If the check is for strnlen() then bind the return value to no more than
+  // the maxlen value.
+  if (IsStrnlen) {
+    QualType cmpTy = C.getSValBuilder().getConditionType();
+
+    // It's a little unfortunate to be getting this again,
+    // but it's not that expensive...
+    const Expr *maxlenExpr = CE->getArg(1);
+    SVal maxlenVal = state->getSVal(maxlenExpr, LCtx);
+
+    Optional<NonLoc> strLengthNL = strLength.getAs<NonLoc>();
+    Optional<NonLoc> maxlenValNL = maxlenVal.getAs<NonLoc>();
+
+    if (strLengthNL && maxlenValNL) {
+      ProgramStateRef stateStringTooLong, stateStringNotTooLong;
+
+      // Check if the strLength is greater than the maxlen.
+      llvm::tie(stateStringTooLong, stateStringNotTooLong) =
+          state->assume(C.getSValBuilder().evalBinOpNN(
+              state, BO_GT, *strLengthNL, *maxlenValNL, cmpTy)
+                            .castAs<DefinedOrUnknownSVal>());
+
+      if (stateStringTooLong && !stateStringNotTooLong) {
+        // If the string is longer than maxlen, return maxlen.
+        result = *maxlenValNL;
+      } else if (stateStringNotTooLong && !stateStringTooLong) {
+        // If the string is shorter than maxlen, return its length.
+        result = *strLengthNL;
+      }
+    }
+
+    if (result.isUnknown()) {
+      // If we don't have enough information for a comparison, there's
+      // no guarantee the full string length will actually be returned.
+      // All we know is the return value is the min of the string length
+      // and the limit. This is better than nothing.
+      result = C.getSValBuilder().conjureSymbolVal(0, CE, LCtx, C.blockCount());
+      NonLoc resultNL = result.castAs<NonLoc>();
+
+      if (strLengthNL) {
+        state = state->assume(C.getSValBuilder().evalBinOpNN(
+                                  state, BO_LE, resultNL, *strLengthNL, cmpTy)
+                                  .castAs<DefinedOrUnknownSVal>(), true);
+      }
+      
+      if (maxlenValNL) {
+        state = state->assume(C.getSValBuilder().evalBinOpNN(
+                                  state, BO_LE, resultNL, *maxlenValNL, cmpTy)
+                                  .castAs<DefinedOrUnknownSVal>(), true);
+      }
+    }
+
+  } else {
+    // This is a plain strlen(), not strnlen().
+    result = strLength.castAs<DefinedOrUnknownSVal>();
+
+    // If we don't know the length of the string, conjure a return
+    // value, so it can be used in constraints, at least.
+    if (result.isUnknown()) {
+      result = C.getSValBuilder().conjureSymbolVal(0, CE, LCtx, C.blockCount());
+    }
+  }
+
+  // Bind the return value.
+  assert(!result.isUnknown() && "Should have conjured a value by now");
+  state = state->BindExpr(CE, LCtx, result);
+  C.addTransition(state);
+}
+
+void CStringChecker::evalStrcpy(CheckerContext &C, const CallExpr *CE) const {
+  if (CE->getNumArgs() < 2)
+    return;
+
+  // char *strcpy(char *restrict dst, const char *restrict src);
+  evalStrcpyCommon(C, CE, 
+                   /* returnEnd = */ false, 
+                   /* isBounded = */ false,
+                   /* isAppending = */ false);
+}
+
+void CStringChecker::evalStrncpy(CheckerContext &C, const CallExpr *CE) const {
+  if (CE->getNumArgs() < 3)
+    return;
+
+  // char *strncpy(char *restrict dst, const char *restrict src, size_t n);
+  evalStrcpyCommon(C, CE, 
+                   /* returnEnd = */ false, 
+                   /* isBounded = */ true,
+                   /* isAppending = */ false);
+}
+
+void CStringChecker::evalStpcpy(CheckerContext &C, const CallExpr *CE) const {
+  if (CE->getNumArgs() < 2)
+    return;
+
+  // char *stpcpy(char *restrict dst, const char *restrict src);
+  evalStrcpyCommon(C, CE, 
+                   /* returnEnd = */ true, 
+                   /* isBounded = */ false,
+                   /* isAppending = */ false);
+}
+
+void CStringChecker::evalStrcat(CheckerContext &C, const CallExpr *CE) const {
+  if (CE->getNumArgs() < 2)
+    return;
+
+  //char *strcat(char *restrict s1, const char *restrict s2);
+  evalStrcpyCommon(C, CE, 
+                   /* returnEnd = */ false, 
+                   /* isBounded = */ false,
+                   /* isAppending = */ true);
+}
+
+void CStringChecker::evalStrncat(CheckerContext &C, const CallExpr *CE) const {
+  if (CE->getNumArgs() < 3)
+    return;
+
+  //char *strncat(char *restrict s1, const char *restrict s2, size_t n);
+  evalStrcpyCommon(C, CE, 
+                   /* returnEnd = */ false, 
+                   /* isBounded = */ true,
+                   /* isAppending = */ true);
+}
+
+void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
+                                      bool returnEnd, bool isBounded,
+                                      bool isAppending) const {
+  CurrentFunctionDescription = "string copy function";
+  ProgramStateRef state = C.getState();
+  const LocationContext *LCtx = C.getLocationContext();
+
+  // Check that the destination is non-null.
+  const Expr *Dst = CE->getArg(0);
+  SVal DstVal = state->getSVal(Dst, LCtx);
+
+  state = checkNonNull(C, state, Dst, DstVal);
+  if (!state)
+    return;
+
+  // Check that the source is non-null.
+  const Expr *srcExpr = CE->getArg(1);
+  SVal srcVal = state->getSVal(srcExpr, LCtx);
+  state = checkNonNull(C, state, srcExpr, srcVal);
+  if (!state)
+    return;
+
+  // Get the string length of the source.
+  SVal strLength = getCStringLength(C, state, srcExpr, srcVal);
+
+  // If the source isn't a valid C string, give up.
+  if (strLength.isUndef())
+    return;
+
+  SValBuilder &svalBuilder = C.getSValBuilder();
+  QualType cmpTy = svalBuilder.getConditionType();
+  QualType sizeTy = svalBuilder.getContext().getSizeType();
+
+  // These two values allow checking two kinds of errors:
+  // - actual overflows caused by a source that doesn't fit in the destination
+  // - potential overflows caused by a bound that could exceed the destination
+  SVal amountCopied = UnknownVal();
+  SVal maxLastElementIndex = UnknownVal();
+  const char *boundWarning = NULL;
+
+  // If the function is strncpy, strncat, etc... it is bounded.
+  if (isBounded) {
+    // Get the max number of characters to copy.
+    const Expr *lenExpr = CE->getArg(2);
+    SVal lenVal = state->getSVal(lenExpr, LCtx);
+
+    // Protect against misdeclared strncpy().
+    lenVal = svalBuilder.evalCast(lenVal, sizeTy, lenExpr->getType());
+
+    Optional<NonLoc> strLengthNL = strLength.getAs<NonLoc>();
+    Optional<NonLoc> lenValNL = lenVal.getAs<NonLoc>();
+
+    // If we know both values, we might be able to figure out how much
+    // we're copying.
+    if (strLengthNL && lenValNL) {
+      ProgramStateRef stateSourceTooLong, stateSourceNotTooLong;
+
+      // Check if the max number to copy is less than the length of the src.
+      // If the bound is equal to the source length, strncpy won't null-
+      // terminate the result!
+      llvm::tie(stateSourceTooLong, stateSourceNotTooLong) = state->assume(
+          svalBuilder.evalBinOpNN(state, BO_GE, *strLengthNL, *lenValNL, cmpTy)
+              .castAs<DefinedOrUnknownSVal>());
+
+      if (stateSourceTooLong && !stateSourceNotTooLong) {
+        // Max number to copy is less than the length of the src, so the actual
+        // strLength copied is the max number arg.
+        state = stateSourceTooLong;
+        amountCopied = lenVal;
+
+      } else if (!stateSourceTooLong && stateSourceNotTooLong) {
+        // The source buffer entirely fits in the bound.
+        state = stateSourceNotTooLong;
+        amountCopied = strLength;
+      }
+    }
+
+    // We still want to know if the bound is known to be too large.
+    if (lenValNL) {
+      if (isAppending) {
+        // For strncat, the check is strlen(dst) + lenVal < sizeof(dst)
+
+        // Get the string length of the destination. If the destination is
+        // memory that can't have a string length, we shouldn't be copying
+        // into it anyway.
+        SVal dstStrLength = getCStringLength(C, state, Dst, DstVal);
+        if (dstStrLength.isUndef())
+          return;
+
+        if (Optional<NonLoc> dstStrLengthNL = dstStrLength.getAs<NonLoc>()) {
+          maxLastElementIndex = svalBuilder.evalBinOpNN(state, BO_Add,
+                                                        *lenValNL,
+                                                        *dstStrLengthNL,
+                                                        sizeTy);
+          boundWarning = "Size argument is greater than the free space in the "
+                         "destination buffer";
+        }
+
+      } else {
+        // For strncpy, this is just checking that lenVal <= sizeof(dst)
+        // (Yes, strncpy and strncat differ in how they treat termination.
+        // strncat ALWAYS terminates, but strncpy doesn't.)
+
+        // We need a special case for when the copy size is zero, in which
+        // case strncpy will do no work at all. Our bounds check uses n-1
+        // as the last element accessed, so n == 0 is problematic.
+        ProgramStateRef StateZeroSize, StateNonZeroSize;
+        llvm::tie(StateZeroSize, StateNonZeroSize) =
+          assumeZero(C, state, *lenValNL, sizeTy);
+
+        // If the size is known to be zero, we're done.
+        if (StateZeroSize && !StateNonZeroSize) {
+          StateZeroSize = StateZeroSize->BindExpr(CE, LCtx, DstVal);
+          C.addTransition(StateZeroSize);
+          return;
+        }
+
+        // Otherwise, go ahead and figure out the last element we'll touch.
+        // We don't record the non-zero assumption here because we can't
+        // be sure. We won't warn on a possible zero.
+        NonLoc one = svalBuilder.makeIntVal(1, sizeTy).castAs<NonLoc>();
+        maxLastElementIndex = svalBuilder.evalBinOpNN(state, BO_Sub, *lenValNL,
+                                                      one, sizeTy);
+        boundWarning = "Size argument is greater than the length of the "
+                       "destination buffer";
+      }
+    }
+
+    // If we couldn't pin down the copy length, at least bound it.
+    // FIXME: We should actually run this code path for append as well, but
+    // right now it creates problems with constraints (since we can end up
+    // trying to pass constraints from symbol to symbol).
+    if (amountCopied.isUnknown() && !isAppending) {
+      // Try to get a "hypothetical" string length symbol, which we can later
+      // set as a real value if that turns out to be the case.
+      amountCopied = getCStringLength(C, state, lenExpr, srcVal, true);
+      assert(!amountCopied.isUndef());
+
+      if (Optional<NonLoc> amountCopiedNL = amountCopied.getAs<NonLoc>()) {
+        if (lenValNL) {
+          // amountCopied <= lenVal
+          SVal copiedLessThanBound = svalBuilder.evalBinOpNN(state, BO_LE,
+                                                             *amountCopiedNL,
+                                                             *lenValNL,
+                                                             cmpTy);
+          state = state->assume(
+              copiedLessThanBound.castAs<DefinedOrUnknownSVal>(), true);
+          if (!state)
+            return;
+        }
+
+        if (strLengthNL) {
+          // amountCopied <= strlen(source)
+          SVal copiedLessThanSrc = svalBuilder.evalBinOpNN(state, BO_LE,
+                                                           *amountCopiedNL,
+                                                           *strLengthNL,
+                                                           cmpTy);
+          state = state->assume(
+              copiedLessThanSrc.castAs<DefinedOrUnknownSVal>(), true);
+          if (!state)
+            return;
+        }
+      }
+    }
+
+  } else {
+    // The function isn't bounded. The amount copied should match the length
+    // of the source buffer.
+    amountCopied = strLength;
+  }
+
+  assert(state);
+
+  // This represents the number of characters copied into the destination
+  // buffer. (It may not actually be the strlen if the destination buffer
+  // is not terminated.)
+  SVal finalStrLength = UnknownVal();
+
+  // If this is an appending function (strcat, strncat...) then set the
+  // string length to strlen(src) + strlen(dst) since the buffer will
+  // ultimately contain both.
+  if (isAppending) {
+    // Get the string length of the destination. If the destination is memory
+    // that can't have a string length, we shouldn't be copying into it anyway.
+    SVal dstStrLength = getCStringLength(C, state, Dst, DstVal);
+    if (dstStrLength.isUndef())
+      return;
+
+    Optional<NonLoc> srcStrLengthNL = amountCopied.getAs<NonLoc>();
+    Optional<NonLoc> dstStrLengthNL = dstStrLength.getAs<NonLoc>();
+    
+    // If we know both string lengths, we might know the final string length.
+    if (srcStrLengthNL && dstStrLengthNL) {
+      // Make sure the two lengths together don't overflow a size_t.
+      state = checkAdditionOverflow(C, state, *srcStrLengthNL, *dstStrLengthNL);
+      if (!state)
+        return;
+
+      finalStrLength = svalBuilder.evalBinOpNN(state, BO_Add, *srcStrLengthNL, 
+                                               *dstStrLengthNL, sizeTy);
+    }
+
+    // If we couldn't get a single value for the final string length,
+    // we can at least bound it by the individual lengths.
+    if (finalStrLength.isUnknown()) {
+      // Try to get a "hypothetical" string length symbol, which we can later
+      // set as a real value if that turns out to be the case.
+      finalStrLength = getCStringLength(C, state, CE, DstVal, true);
+      assert(!finalStrLength.isUndef());
+
+      if (Optional<NonLoc> finalStrLengthNL = finalStrLength.getAs<NonLoc>()) {
+        if (srcStrLengthNL) {
+          // finalStrLength >= srcStrLength
+          SVal sourceInResult = svalBuilder.evalBinOpNN(state, BO_GE,
+                                                        *finalStrLengthNL,
+                                                        *srcStrLengthNL,
+                                                        cmpTy);
+          state = state->assume(sourceInResult.castAs<DefinedOrUnknownSVal>(),
+                                true);
+          if (!state)
+            return;
+        }
+
+        if (dstStrLengthNL) {
+          // finalStrLength >= dstStrLength
+          SVal destInResult = svalBuilder.evalBinOpNN(state, BO_GE,
+                                                      *finalStrLengthNL,
+                                                      *dstStrLengthNL,
+                                                      cmpTy);
+          state =
+              state->assume(destInResult.castAs<DefinedOrUnknownSVal>(), true);
+          if (!state)
+            return;
+        }
+      }
+    }
+
+  } else {
+    // Otherwise, this is a copy-over function (strcpy, strncpy, ...), and
+    // the final string length will match the input string length.
+    finalStrLength = amountCopied;
+  }
+
+  // The final result of the function will either be a pointer past the last
+  // copied element, or a pointer to the start of the destination buffer.
+  SVal Result = (returnEnd ? UnknownVal() : DstVal);
+
+  assert(state);
+
+  // If the destination is a MemRegion, try to check for a buffer overflow and
+  // record the new string length.
+  if (Optional<loc::MemRegionVal> dstRegVal =
+          DstVal.getAs<loc::MemRegionVal>()) {
+    QualType ptrTy = Dst->getType();
+
+    // If we have an exact value on a bounded copy, use that to check for
+    // overflows, rather than our estimate about how much is actually copied.
+    if (boundWarning) {
+      if (Optional<NonLoc> maxLastNL = maxLastElementIndex.getAs<NonLoc>()) {
+        SVal maxLastElement = svalBuilder.evalBinOpLN(state, BO_Add, *dstRegVal,
+                                                      *maxLastNL, ptrTy);
+        state = CheckLocation(C, state, CE->getArg(2), maxLastElement, 
+                              boundWarning);
+        if (!state)
+          return;
+      }
+    }
+
+    // Then, if the final length is known...
+    if (Optional<NonLoc> knownStrLength = finalStrLength.getAs<NonLoc>()) {
+      SVal lastElement = svalBuilder.evalBinOpLN(state, BO_Add, *dstRegVal,
+                                                 *knownStrLength, ptrTy);
+
+      // ...and we haven't checked the bound, we'll check the actual copy.
+      if (!boundWarning) {
+        const char * const warningMsg =
+          "String copy function overflows destination buffer";
+        state = CheckLocation(C, state, Dst, lastElement, warningMsg);
+        if (!state)
+          return;
+      }
+
+      // If this is a stpcpy-style copy, the last element is the return value.
+      if (returnEnd)
+        Result = lastElement;
+    }
+
+    // Invalidate the destination. This must happen before we set the C string
+    // length because invalidation will clear the length.
+    // FIXME: Even if we can't perfectly model the copy, we should see if we
+    // can use LazyCompoundVals to copy the source values into the destination.
+    // This would probably remove any existing bindings past the end of the
+    // string, but that's still an improvement over blank invalidation.
+    state = InvalidateBuffer(C, state, Dst, *dstRegVal);
+
+    // Set the C string length of the destination, if we know it.
+    if (isBounded && !isAppending) {
+      // strncpy is annoying in that it doesn't guarantee to null-terminate
+      // the result string. If the original string didn't fit entirely inside
+      // the bound (including the null-terminator), we don't know how long the
+      // result is.
+      if (amountCopied != strLength)
+        finalStrLength = UnknownVal();
+    }
+    state = setCStringLength(state, dstRegVal->getRegion(), finalStrLength);
+  }
+
+  assert(state);
+
+  // If this is a stpcpy-style copy, but we were unable to check for a buffer
+  // overflow, we still need a result. Conjure a return value.
+  if (returnEnd && Result.isUnknown()) {
+    Result = svalBuilder.conjureSymbolVal(0, CE, LCtx, C.blockCount());
+  }
+
+  // Set the return value.
+  state = state->BindExpr(CE, LCtx, Result);
+  C.addTransition(state);
+}
+
+void CStringChecker::evalStrcmp(CheckerContext &C, const CallExpr *CE) const {
+  if (CE->getNumArgs() < 2)
+    return;
+
+  //int strcmp(const char *s1, const char *s2);
+  evalStrcmpCommon(C, CE, /* isBounded = */ false, /* ignoreCase = */ false);
+}
+
+void CStringChecker::evalStrncmp(CheckerContext &C, const CallExpr *CE) const {
+  if (CE->getNumArgs() < 3)
+    return;
+
+  //int strncmp(const char *s1, const char *s2, size_t n);
+  evalStrcmpCommon(C, CE, /* isBounded = */ true, /* ignoreCase = */ false);
+}
+
+void CStringChecker::evalStrcasecmp(CheckerContext &C, 
+                                    const CallExpr *CE) const {
+  if (CE->getNumArgs() < 2)
+    return;
+
+  //int strcasecmp(const char *s1, const char *s2);
+  evalStrcmpCommon(C, CE, /* isBounded = */ false, /* ignoreCase = */ true);
+}
+
+void CStringChecker::evalStrncasecmp(CheckerContext &C, 
+                                     const CallExpr *CE) const {
+  if (CE->getNumArgs() < 3)
+    return;
+
+  //int strncasecmp(const char *s1, const char *s2, size_t n);
+  evalStrcmpCommon(C, CE, /* isBounded = */ true, /* ignoreCase = */ true);
+}
+
+void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
+                                      bool isBounded, bool ignoreCase) const {
+  CurrentFunctionDescription = "string comparison function";
+  ProgramStateRef state = C.getState();
+  const LocationContext *LCtx = C.getLocationContext();
+
+  // Check that the first string is non-null
+  const Expr *s1 = CE->getArg(0);
+  SVal s1Val = state->getSVal(s1, LCtx);
+  state = checkNonNull(C, state, s1, s1Val);
+  if (!state)
+    return;
+
+  // Check that the second string is non-null.
+  const Expr *s2 = CE->getArg(1);
+  SVal s2Val = state->getSVal(s2, LCtx);
+  state = checkNonNull(C, state, s2, s2Val);
+  if (!state)
+    return;
+
+  // Get the string length of the first string or give up.
+  SVal s1Length = getCStringLength(C, state, s1, s1Val);
+  if (s1Length.isUndef())
+    return;
+
+  // Get the string length of the second string or give up.
+  SVal s2Length = getCStringLength(C, state, s2, s2Val);
+  if (s2Length.isUndef())
+    return;
+
+  // If we know the two buffers are the same, we know the result is 0.
+  // First, get the two buffers' addresses. Another checker will have already
+  // made sure they're not undefined.
+  DefinedOrUnknownSVal LV = s1Val.castAs<DefinedOrUnknownSVal>();
+  DefinedOrUnknownSVal RV = s2Val.castAs<DefinedOrUnknownSVal>();
+
+  // See if they are the same.
+  SValBuilder &svalBuilder = C.getSValBuilder();
+  DefinedOrUnknownSVal SameBuf = svalBuilder.evalEQ(state, LV, RV);
+  ProgramStateRef StSameBuf, StNotSameBuf;
+  llvm::tie(StSameBuf, StNotSameBuf) = state->assume(SameBuf);
+
+  // If the two arguments might be the same buffer, we know the result is 0,
+  // and we only need to check one size.
+  if (StSameBuf) {
+    StSameBuf = StSameBuf->BindExpr(CE, LCtx,
+                                    svalBuilder.makeZeroVal(CE->getType()));
+    C.addTransition(StSameBuf);
+
+    // If the two arguments are GUARANTEED to be the same, we're done!
+    if (!StNotSameBuf)
+      return;
+  }
+
+  assert(StNotSameBuf);
+  state = StNotSameBuf;
+
+  // At this point we can go about comparing the two buffers.
+  // For now, we only do this if they're both known string literals.
+
+  // Attempt to extract string literals from both expressions.
+  const StringLiteral *s1StrLiteral = getCStringLiteral(C, state, s1, s1Val);
+  const StringLiteral *s2StrLiteral = getCStringLiteral(C, state, s2, s2Val);
+  bool canComputeResult = false;
+
+  if (s1StrLiteral && s2StrLiteral) {
+    StringRef s1StrRef = s1StrLiteral->getString();
+    StringRef s2StrRef = s2StrLiteral->getString();
+
+    if (isBounded) {
+      // Get the max number of characters to compare.
+      const Expr *lenExpr = CE->getArg(2);
+      SVal lenVal = state->getSVal(lenExpr, LCtx);
+
+      // If the length is known, we can get the right substrings.
+      if (const llvm::APSInt *len = svalBuilder.getKnownValue(state, lenVal)) {
+        // Create substrings of each to compare the prefix.
+        s1StrRef = s1StrRef.substr(0, (size_t)len->getZExtValue());
+        s2StrRef = s2StrRef.substr(0, (size_t)len->getZExtValue());
+        canComputeResult = true;
+      }
+    } else {
+      // This is a normal, unbounded strcmp.
+      canComputeResult = true;
+    }
+
+    if (canComputeResult) {
+      // Real strcmp stops at null characters.
+      size_t s1Term = s1StrRef.find('\0');
+      if (s1Term != StringRef::npos)
+        s1StrRef = s1StrRef.substr(0, s1Term);
+
+      size_t s2Term = s2StrRef.find('\0');
+      if (s2Term != StringRef::npos)
+        s2StrRef = s2StrRef.substr(0, s2Term);
+
+      // Use StringRef's comparison methods to compute the actual result.
+      int result;
+
+      if (ignoreCase) {
+        // Compare string 1 to string 2 the same way strcasecmp() does.
+        result = s1StrRef.compare_lower(s2StrRef);
+      } else {
+        // Compare string 1 to string 2 the same way strcmp() does.
+        result = s1StrRef.compare(s2StrRef);
+      }
+
+      // Build the SVal of the comparison and bind the return value.
+      SVal resultVal = svalBuilder.makeIntVal(result, CE->getType());
+      state = state->BindExpr(CE, LCtx, resultVal);
+    }
+  }
+
+  if (!canComputeResult) {
+    // Conjure a symbolic value. It's the best we can do.
+    SVal resultVal = svalBuilder.conjureSymbolVal(0, CE, LCtx, C.blockCount());
+    state = state->BindExpr(CE, LCtx, resultVal);
+  }
+
+  // Record this as a possible path.
+  C.addTransition(state);
+}
+
+void CStringChecker::evalStrsep(CheckerContext &C, const CallExpr *CE) const {
+  //char *strsep(char **stringp, const char *delim);
+  if (CE->getNumArgs() < 2)
+    return;
+
+  // Sanity: does the search string parameter match the return type?
+  const Expr *SearchStrPtr = CE->getArg(0);
+  QualType CharPtrTy = SearchStrPtr->getType()->getPointeeType();
+  if (CharPtrTy.isNull() ||
+      CE->getType().getUnqualifiedType() != CharPtrTy.getUnqualifiedType())
+    return;
+
+  CurrentFunctionDescription = "strsep()";
+  ProgramStateRef State = C.getState();
+  const LocationContext *LCtx = C.getLocationContext();
+
+  // Check that the search string pointer is non-null (though it may point to
+  // a null string).
+  SVal SearchStrVal = State->getSVal(SearchStrPtr, LCtx);
+  State = checkNonNull(C, State, SearchStrPtr, SearchStrVal);
+  if (!State)
+    return;
+
+  // Check that the delimiter string is non-null.
+  const Expr *DelimStr = CE->getArg(1);
+  SVal DelimStrVal = State->getSVal(DelimStr, LCtx);
+  State = checkNonNull(C, State, DelimStr, DelimStrVal);
+  if (!State)
+    return;
+
+  SValBuilder &SVB = C.getSValBuilder();
+  SVal Result;
+  if (Optional<Loc> SearchStrLoc = SearchStrVal.getAs<Loc>()) {
+    // Get the current value of the search string pointer, as a char*.
+    Result = State->getSVal(*SearchStrLoc, CharPtrTy);
+
+    // Invalidate the search string, representing the change of one delimiter
+    // character to NUL.
+    State = InvalidateBuffer(C, State, SearchStrPtr, Result);
+
+    // Overwrite the search string pointer. The new value is either an address
+    // further along in the same string, or NULL if there are no more tokens.
+    State = State->bindLoc(*SearchStrLoc,
+                           SVB.conjureSymbolVal(getTag(), CE, LCtx, CharPtrTy,
+                                                C.blockCount()));
+  } else {
+    assert(SearchStrVal.isUnknown());
+    // Conjure a symbolic value. It's the best we can do.
+    Result = SVB.conjureSymbolVal(0, CE, LCtx, C.blockCount());
+  }
+
+  // Set the return value, and finish.
+  State = State->BindExpr(CE, LCtx, Result);
+  C.addTransition(State);
+}
+
+
+//===----------------------------------------------------------------------===//
+// The driver method, and other Checker callbacks.
+//===----------------------------------------------------------------------===//
+
+bool CStringChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
+  const FunctionDecl *FDecl = C.getCalleeDecl(CE);
+
+  if (!FDecl)
+    return false;
+
+  // FIXME: Poorly-factored string switches are slow.
+  FnCheck evalFunction = 0;
+  if (C.isCLibraryFunction(FDecl, "memcpy"))
+    evalFunction =  &CStringChecker::evalMemcpy;
+  else if (C.isCLibraryFunction(FDecl, "mempcpy"))
+    evalFunction =  &CStringChecker::evalMempcpy;
+  else if (C.isCLibraryFunction(FDecl, "memcmp"))
+    evalFunction =  &CStringChecker::evalMemcmp;
+  else if (C.isCLibraryFunction(FDecl, "memmove"))
+    evalFunction =  &CStringChecker::evalMemmove;
+  else if (C.isCLibraryFunction(FDecl, "strcpy"))
+    evalFunction =  &CStringChecker::evalStrcpy;
+  else if (C.isCLibraryFunction(FDecl, "strncpy"))
+    evalFunction =  &CStringChecker::evalStrncpy;
+  else if (C.isCLibraryFunction(FDecl, "stpcpy"))
+    evalFunction =  &CStringChecker::evalStpcpy;
+  else if (C.isCLibraryFunction(FDecl, "strcat"))
+    evalFunction =  &CStringChecker::evalStrcat;
+  else if (C.isCLibraryFunction(FDecl, "strncat"))
+    evalFunction =  &CStringChecker::evalStrncat;
+  else if (C.isCLibraryFunction(FDecl, "strlen"))
+    evalFunction =  &CStringChecker::evalstrLength;
+  else if (C.isCLibraryFunction(FDecl, "strnlen"))
+    evalFunction =  &CStringChecker::evalstrnLength;
+  else if (C.isCLibraryFunction(FDecl, "strcmp"))
+    evalFunction =  &CStringChecker::evalStrcmp;
+  else if (C.isCLibraryFunction(FDecl, "strncmp"))
+    evalFunction =  &CStringChecker::evalStrncmp;
+  else if (C.isCLibraryFunction(FDecl, "strcasecmp"))
+    evalFunction =  &CStringChecker::evalStrcasecmp;
+  else if (C.isCLibraryFunction(FDecl, "strncasecmp"))
+    evalFunction =  &CStringChecker::evalStrncasecmp;
+  else if (C.isCLibraryFunction(FDecl, "strsep"))
+    evalFunction =  &CStringChecker::evalStrsep;
+  else if (C.isCLibraryFunction(FDecl, "bcopy"))
+    evalFunction =  &CStringChecker::evalBcopy;
+  else if (C.isCLibraryFunction(FDecl, "bcmp"))
+    evalFunction =  &CStringChecker::evalMemcmp;
+  
+  // If the callee isn't a string function, let another checker handle it.
+  if (!evalFunction)
+    return false;
+
+  // Make sure each function sets its own description.
+  // (But don't bother in a release build.)
+  assert(!(CurrentFunctionDescription = NULL));
+
+  // Check and evaluate the call.
+  (this->*evalFunction)(C, CE);
+
+  // If the evaluate call resulted in no change, chain to the next eval call
+  // handler.
+  // Note, the custom CString evaluation calls assume that basic safety
+  // properties are held. However, if the user chooses to turn off some of these
+  // checks, we ignore the issues and leave the call evaluation to a generic
+  // handler.
+  if (!C.isDifferent())
+    return false;
+
+  return true;
+}
+
+void CStringChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
+  // Record string length for char a[] = "abc";
+  ProgramStateRef state = C.getState();
+
+  for (DeclStmt::const_decl_iterator I = DS->decl_begin(), E = DS->decl_end();
+       I != E; ++I) {
+    const VarDecl *D = dyn_cast<VarDecl>(*I);
+    if (!D)
+      continue;
+
+    // FIXME: Handle array fields of structs.
+    if (!D->getType()->isArrayType())
+      continue;
+
+    const Expr *Init = D->getInit();
+    if (!Init)
+      continue;
+    if (!isa<StringLiteral>(Init))
+      continue;
+
+    Loc VarLoc = state->getLValue(D, C.getLocationContext());
+    const MemRegion *MR = VarLoc.getAsRegion();
+    if (!MR)
+      continue;
+
+    SVal StrVal = state->getSVal(Init, C.getLocationContext());
+    assert(StrVal.isValid() && "Initializer string is unknown or undefined");
+    DefinedOrUnknownSVal strLength =
+        getCStringLength(C, state, Init, StrVal).castAs<DefinedOrUnknownSVal>();
+
+    state = state->set<CStringLength>(MR, strLength);
+  }
+
+  C.addTransition(state);
+}
+
+bool CStringChecker::wantsRegionChangeUpdate(ProgramStateRef state) const {
+  CStringLengthTy Entries = state->get<CStringLength>();
+  return !Entries.isEmpty();
+}
+
+ProgramStateRef 
+CStringChecker::checkRegionChanges(ProgramStateRef state,
+                                   const InvalidatedSymbols *,
+                                   ArrayRef<const MemRegion *> ExplicitRegions,
+                                   ArrayRef<const MemRegion *> Regions,
+                                   const CallEvent *Call) const {
+  CStringLengthTy Entries = state->get<CStringLength>();
+  if (Entries.isEmpty())
+    return state;
+
+  llvm::SmallPtrSet<const MemRegion *, 8> Invalidated;
+  llvm::SmallPtrSet<const MemRegion *, 32> SuperRegions;
+
+  // First build sets for the changed regions and their super-regions.
+  for (ArrayRef<const MemRegion *>::iterator
+       I = Regions.begin(), E = Regions.end(); I != E; ++I) {
+    const MemRegion *MR = *I;
+    Invalidated.insert(MR);
+
+    SuperRegions.insert(MR);
+    while (const SubRegion *SR = dyn_cast<SubRegion>(MR)) {
+      MR = SR->getSuperRegion();
+      SuperRegions.insert(MR);
+    }
+  }
+
+  CStringLengthTy::Factory &F = state->get_context<CStringLength>();
+
+  // Then loop over the entries in the current state.
+  for (CStringLengthTy::iterator I = Entries.begin(),
+       E = Entries.end(); I != E; ++I) {
+    const MemRegion *MR = I.getKey();
+
+    // Is this entry for a super-region of a changed region?
+    if (SuperRegions.count(MR)) {
+      Entries = F.remove(Entries, MR);
+      continue;
+    }
+
+    // Is this entry for a sub-region of a changed region?
+    const MemRegion *Super = MR;
+    while (const SubRegion *SR = dyn_cast<SubRegion>(Super)) {
+      Super = SR->getSuperRegion();
+      if (Invalidated.count(Super)) {
+        Entries = F.remove(Entries, MR);
+        break;
+      }
+    }
+  }
+
+  return state->set<CStringLength>(Entries);
+}
+
+void CStringChecker::checkLiveSymbols(ProgramStateRef state,
+                                      SymbolReaper &SR) const {
+  // Mark all symbols in our string length map as valid.
+  CStringLengthTy Entries = state->get<CStringLength>();
+
+  for (CStringLengthTy::iterator I = Entries.begin(), E = Entries.end();
+       I != E; ++I) {
+    SVal Len = I.getData();
+
+    for (SymExpr::symbol_iterator si = Len.symbol_begin(),
+                                  se = Len.symbol_end(); si != se; ++si)
+      SR.markInUse(*si);
+  }
+}
+
+void CStringChecker::checkDeadSymbols(SymbolReaper &SR,
+                                      CheckerContext &C) const {
+  if (!SR.hasDeadSymbols())
+    return;
+
+  ProgramStateRef state = C.getState();
+  CStringLengthTy Entries = state->get<CStringLength>();
+  if (Entries.isEmpty())
+    return;
+
+  CStringLengthTy::Factory &F = state->get_context<CStringLength>();
+  for (CStringLengthTy::iterator I = Entries.begin(), E = Entries.end();
+       I != E; ++I) {
+    SVal Len = I.getData();
+    if (SymbolRef Sym = Len.getAsSymbol()) {
+      if (SR.isDead(Sym))
+        Entries = F.remove(Entries, I.getKey());
+    }
+  }
+
+  state = state->set<CStringLength>(Entries);
+  C.addTransition(state);
+}
+
+#define REGISTER_CHECKER(name) \
+void ento::register##name(CheckerManager &mgr) {\
+  static CStringChecker *TheChecker = 0; \
+  if (TheChecker == 0) \
+    TheChecker = mgr.registerChecker<CStringChecker>(); \
+  TheChecker->Filter.Check##name = true; \
+}
+
+REGISTER_CHECKER(CStringNullArg)
+REGISTER_CHECKER(CStringOutOfBounds)
+REGISTER_CHECKER(CStringBufferOverlap)
+REGISTER_CHECKER(CStringNotNullTerm)
+
+void ento::registerCStringCheckerBasic(CheckerManager &Mgr) {
+  registerCStringNullArg(Mgr);
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
new file mode 100644
index 0000000..92c0eef
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
@@ -0,0 +1,192 @@
+//== CStringSyntaxChecker.cpp - CoreFoundation containers API *- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// An AST checker that looks for common pitfalls when using C string APIs.
+//  - Identifies erroneous patterns in the last argument to strncat - the number
+//    of bytes to copy.
+//
+//===----------------------------------------------------------------------===//
+#include "ClangSACheckers.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/OperationKinds.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TypeTraits.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class WalkAST: public StmtVisitor<WalkAST> {
+  BugReporter &BR;
+  AnalysisDeclContext* AC;
+
+  /// Check if two expressions refer to the same declaration.
+  inline bool sameDecl(const Expr *A1, const Expr *A2) {
+    if (const DeclRefExpr *D1 = dyn_cast<DeclRefExpr>(A1->IgnoreParenCasts()))
+      if (const DeclRefExpr *D2 = dyn_cast<DeclRefExpr>(A2->IgnoreParenCasts()))
+        return D1->getDecl() == D2->getDecl();
+    return false;
+  }
+
+  /// Check if the expression E is a sizeof(WithArg).
+  inline bool isSizeof(const Expr *E, const Expr *WithArg) {
+    if (const UnaryExprOrTypeTraitExpr *UE =
+    dyn_cast<UnaryExprOrTypeTraitExpr>(E))
+      if (UE->getKind() == UETT_SizeOf)
+        return sameDecl(UE->getArgumentExpr(), WithArg);
+    return false;
+  }
+
+  /// Check if the expression E is a strlen(WithArg).
+  inline bool isStrlen(const Expr *E, const Expr *WithArg) {
+    if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
+      const FunctionDecl *FD = CE->getDirectCallee();
+      if (!FD)
+        return false;
+      return (CheckerContext::isCLibraryFunction(FD, "strlen") &&
+              sameDecl(CE->getArg(0), WithArg));
+    }
+    return false;
+  }
+
+  /// Check if the expression is an integer literal with value 1.
+  inline bool isOne(const Expr *E) {
+    if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
+      return (IL->getValue().isIntN(1));
+    return false;
+  }
+
+  inline StringRef getPrintableName(const Expr *E) {
+    if (const DeclRefExpr *D = dyn_cast<DeclRefExpr>(E->IgnoreParenCasts()))
+      return D->getDecl()->getName();
+    return StringRef();
+  }
+
+  /// Identify erroneous patterns in the last argument to strncat - the number
+  /// of bytes to copy.
+  bool containsBadStrncatPattern(const CallExpr *CE);
+
+public:
+  WalkAST(BugReporter &br, AnalysisDeclContext* ac) :
+      BR(br), AC(ac) {
+  }
+
+  // Statement visitor methods.
+  void VisitChildren(Stmt *S);
+  void VisitStmt(Stmt *S) {
+    VisitChildren(S);
+  }
+  void VisitCallExpr(CallExpr *CE);
+};
+} // end anonymous namespace
+
+// The correct size argument should look like following:
+//   strncat(dst, src, sizeof(dst) - strlen(dest) - 1);
+// We look for the following anti-patterns:
+//   - strncat(dst, src, sizeof(dst) - strlen(dst));
+//   - strncat(dst, src, sizeof(dst) - 1);
+//   - strncat(dst, src, sizeof(dst));
+bool WalkAST::containsBadStrncatPattern(const CallExpr *CE) {
+  if (CE->getNumArgs() != 3)
+    return false;
+  const Expr *DstArg = CE->getArg(0);
+  const Expr *SrcArg = CE->getArg(1);
+  const Expr *LenArg = CE->getArg(2);
+
+  // Identify wrong size expressions, which are commonly used instead.
+  if (const BinaryOperator *BE =
+              dyn_cast<BinaryOperator>(LenArg->IgnoreParenCasts())) {
+    // - sizeof(dst) - strlen(dst)
+    if (BE->getOpcode() == BO_Sub) {
+      const Expr *L = BE->getLHS();
+      const Expr *R = BE->getRHS();
+      if (isSizeof(L, DstArg) && isStrlen(R, DstArg))
+        return true;
+
+      // - sizeof(dst) - 1
+      if (isSizeof(L, DstArg) && isOne(R->IgnoreParenCasts()))
+        return true;
+    }
+  }
+  // - sizeof(dst)
+  if (isSizeof(LenArg, DstArg))
+    return true;
+
+  // - sizeof(src)
+  if (isSizeof(LenArg, SrcArg))
+    return true;
+  return false;
+}
+
+void WalkAST::VisitCallExpr(CallExpr *CE) {
+  const FunctionDecl *FD = CE->getDirectCallee();
+  if (!FD)
+    return;
+
+  if (CheckerContext::isCLibraryFunction(FD, "strncat")) {
+    if (containsBadStrncatPattern(CE)) {
+      const Expr *DstArg = CE->getArg(0);
+      const Expr *LenArg = CE->getArg(2);
+      SourceRange R = LenArg->getSourceRange();
+      PathDiagnosticLocation Loc =
+        PathDiagnosticLocation::createBegin(LenArg, BR.getSourceManager(), AC);
+
+      StringRef DstName = getPrintableName(DstArg);
+
+      SmallString<256> S;
+      llvm::raw_svector_ostream os(S);
+      os << "Potential buffer overflow. ";
+      if (!DstName.empty()) {
+        os << "Replace with 'sizeof(" << DstName << ") "
+              "- strlen(" << DstName <<") - 1'";
+        os << " or u";
+      } else
+        os << "U";
+      os << "se a safer 'strlcat' API";
+
+      BR.EmitBasicReport(FD, "Anti-pattern in the argument", "C String API",
+                         os.str(), Loc, &R, 1);
+    }
+  }
+
+  // Recurse and check children.
+  VisitChildren(CE);
+}
+
+void WalkAST::VisitChildren(Stmt *S) {
+  for (Stmt::child_iterator I = S->child_begin(), E = S->child_end(); I != E;
+      ++I)
+    if (Stmt *child = *I)
+      Visit(child);
+}
+
+namespace {
+class CStringSyntaxChecker: public Checker<check::ASTCodeBody> {
+public:
+
+  void checkASTCodeBody(const Decl *D, AnalysisManager& Mgr,
+      BugReporter &BR) const {
+    WalkAST walker(BR, Mgr.getAnalysisDeclContext(D));
+    walker.Visit(D->getBody());
+  }
+};
+}
+
+void ento::registerCStringSyntaxChecker(CheckerManager &mgr) {
+  mgr.registerChecker<CStringSyntaxChecker>();
+}
+
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
new file mode 100644
index 0000000..4965d22
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
@@ -0,0 +1,464 @@
+//===--- CallAndMessageChecker.cpp ------------------------------*- C++ -*--==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines CallAndMessageChecker, a builtin checker that checks for various
+// errors of call and objc message expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class CallAndMessageChecker
+  : public Checker< check::PreStmt<CallExpr>, check::PreObjCMessage,
+                    check::PreCall > {
+  mutable OwningPtr<BugType> BT_call_null;
+  mutable OwningPtr<BugType> BT_call_undef;
+  mutable OwningPtr<BugType> BT_cxx_call_null;
+  mutable OwningPtr<BugType> BT_cxx_call_undef;
+  mutable OwningPtr<BugType> BT_call_arg;
+  mutable OwningPtr<BugType> BT_msg_undef;
+  mutable OwningPtr<BugType> BT_objc_prop_undef;
+  mutable OwningPtr<BugType> BT_objc_subscript_undef;
+  mutable OwningPtr<BugType> BT_msg_arg;
+  mutable OwningPtr<BugType> BT_msg_ret;
+public:
+
+  void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+  void checkPreObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
+  void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+
+private:
+  static bool PreVisitProcessArg(CheckerContext &C, SVal V,
+                                 SourceRange argRange, const Expr *argEx,
+                                 bool IsFirstArgument, bool checkUninitFields,
+                                 const CallEvent &Call, OwningPtr<BugType> &BT);
+
+  static void emitBadCall(BugType *BT, CheckerContext &C, const Expr *BadE);
+  void emitNilReceiverBug(CheckerContext &C, const ObjCMethodCall &msg,
+                          ExplodedNode *N) const;
+
+  void HandleNilReceiver(CheckerContext &C,
+                         ProgramStateRef state,
+                         const ObjCMethodCall &msg) const;
+
+  static void LazyInit_BT(const char *desc, OwningPtr<BugType> &BT) {
+    if (!BT)
+      BT.reset(new BuiltinBug(desc));
+  }
+};
+} // end anonymous namespace
+
+void CallAndMessageChecker::emitBadCall(BugType *BT, CheckerContext &C,
+                                        const Expr *BadE) {
+  ExplodedNode *N = C.generateSink();
+  if (!N)
+    return;
+
+  BugReport *R = new BugReport(*BT, BT->getName(), N);
+  if (BadE) {
+    R->addRange(BadE->getSourceRange());
+    if (BadE->isGLValue())
+      BadE = bugreporter::getDerefExpr(BadE);
+    bugreporter::trackNullOrUndefValue(N, BadE, *R);
+  }
+  C.emitReport(R);
+}
+
+static StringRef describeUninitializedArgumentInCall(const CallEvent &Call,
+                                                     bool IsFirstArgument) {
+  switch (Call.getKind()) {
+  case CE_ObjCMessage: {
+    const ObjCMethodCall &Msg = cast<ObjCMethodCall>(Call);
+    switch (Msg.getMessageKind()) {
+    case OCM_Message:
+      return "Argument in message expression is an uninitialized value";
+    case OCM_PropertyAccess:
+      assert(Msg.isSetter() && "Getters have no args");
+      return "Argument for property setter is an uninitialized value";
+    case OCM_Subscript:
+      if (Msg.isSetter() && IsFirstArgument)
+        return "Argument for subscript setter is an uninitialized value";
+      return "Subscript index is an uninitialized value";
+    }
+    llvm_unreachable("Unknown message kind.");
+  }
+  case CE_Block:
+    return "Block call argument is an uninitialized value";
+  default:
+    return "Function call argument is an uninitialized value";
+  }
+}
+
+bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
+                                               SVal V, SourceRange argRange,
+                                               const Expr *argEx,
+                                               bool IsFirstArgument,
+                                               bool checkUninitFields,
+                                               const CallEvent &Call,
+                                               OwningPtr<BugType> &BT) {
+  if (V.isUndef()) {
+    if (ExplodedNode *N = C.generateSink()) {
+      LazyInit_BT("Uninitialized argument value", BT);
+
+      // Generate a report for this bug.
+      StringRef Desc = describeUninitializedArgumentInCall(Call,
+                                                           IsFirstArgument);
+      BugReport *R = new BugReport(*BT, Desc, N);
+      R->addRange(argRange);
+      if (argEx)
+        bugreporter::trackNullOrUndefValue(N, argEx, *R);
+      C.emitReport(R);
+    }
+    return true;
+  }
+
+  if (!checkUninitFields)
+    return false;
+
+  if (Optional<nonloc::LazyCompoundVal> LV =
+          V.getAs<nonloc::LazyCompoundVal>()) {
+
+    class FindUninitializedField {
+    public:
+      SmallVector<const FieldDecl *, 10> FieldChain;
+    private:
+      StoreManager &StoreMgr;
+      MemRegionManager &MrMgr;
+      Store store;
+    public:
+      FindUninitializedField(StoreManager &storeMgr,
+                             MemRegionManager &mrMgr, Store s)
+      : StoreMgr(storeMgr), MrMgr(mrMgr), store(s) {}
+
+      bool Find(const TypedValueRegion *R) {
+        QualType T = R->getValueType();
+        if (const RecordType *RT = T->getAsStructureType()) {
+          const RecordDecl *RD = RT->getDecl()->getDefinition();
+          assert(RD && "Referred record has no definition");
+          for (RecordDecl::field_iterator I =
+               RD->field_begin(), E = RD->field_end(); I!=E; ++I) {
+            const FieldRegion *FR = MrMgr.getFieldRegion(*I, R);
+            FieldChain.push_back(*I);
+            T = I->getType();
+            if (T->getAsStructureType()) {
+              if (Find(FR))
+                return true;
+            }
+            else {
+              const SVal &V = StoreMgr.getBinding(store, loc::MemRegionVal(FR));
+              if (V.isUndef())
+                return true;
+            }
+            FieldChain.pop_back();
+          }
+        }
+
+        return false;
+      }
+    };
+
+    const LazyCompoundValData *D = LV->getCVData();
+    FindUninitializedField F(C.getState()->getStateManager().getStoreManager(),
+                             C.getSValBuilder().getRegionManager(),
+                             D->getStore());
+
+    if (F.Find(D->getRegion())) {
+      if (ExplodedNode *N = C.generateSink()) {
+        LazyInit_BT("Uninitialized argument value", BT);
+        SmallString<512> Str;
+        llvm::raw_svector_ostream os(Str);
+        os << "Passed-by-value struct argument contains uninitialized data";
+
+        if (F.FieldChain.size() == 1)
+          os << " (e.g., field: '" << *F.FieldChain[0] << "')";
+        else {
+          os << " (e.g., via the field chain: '";
+          bool first = true;
+          for (SmallVectorImpl<const FieldDecl *>::iterator
+               DI = F.FieldChain.begin(), DE = F.FieldChain.end(); DI!=DE;++DI){
+            if (first)
+              first = false;
+            else
+              os << '.';
+            os << **DI;
+          }
+          os << "')";
+        }
+
+        // Generate a report for this bug.
+        BugReport *R = new BugReport(*BT, os.str(), N);
+        R->addRange(argRange);
+
+        // FIXME: enhance track back for uninitialized value for arbitrary
+        // memregions
+        C.emitReport(R);
+      }
+      return true;
+    }
+  }
+
+  return false;
+}
+
+void CallAndMessageChecker::checkPreStmt(const CallExpr *CE,
+                                         CheckerContext &C) const{
+
+  const Expr *Callee = CE->getCallee()->IgnoreParens();
+  ProgramStateRef State = C.getState();
+  const LocationContext *LCtx = C.getLocationContext();
+  SVal L = State->getSVal(Callee, LCtx);
+
+  if (L.isUndef()) {
+    if (!BT_call_undef)
+      BT_call_undef.reset(new BuiltinBug("Called function pointer is an "
+                                         "uninitalized pointer value"));
+    emitBadCall(BT_call_undef.get(), C, Callee);
+    return;
+  }
+
+  ProgramStateRef StNonNull, StNull;
+  llvm::tie(StNonNull, StNull) =
+      State->assume(L.castAs<DefinedOrUnknownSVal>());
+
+  if (StNull && !StNonNull) {
+    if (!BT_call_null)
+      BT_call_null.reset(
+        new BuiltinBug("Called function pointer is null (null dereference)"));
+    emitBadCall(BT_call_null.get(), C, Callee);
+  }
+
+  C.addTransition(StNonNull);
+}
+
+void CallAndMessageChecker::checkPreCall(const CallEvent &Call,
+                                         CheckerContext &C) const {
+  ProgramStateRef State = C.getState();
+
+  // If this is a call to a C++ method, check if the callee is null or
+  // undefined.
+  if (const CXXInstanceCall *CC = dyn_cast<CXXInstanceCall>(&Call)) {
+    SVal V = CC->getCXXThisVal();
+    if (V.isUndef()) {
+      if (!BT_cxx_call_undef)
+        BT_cxx_call_undef.reset(new BuiltinBug("Called C++ object pointer is "
+                                               "uninitialized"));
+      emitBadCall(BT_cxx_call_undef.get(), C, CC->getCXXThisExpr());
+      return;
+    }
+
+    ProgramStateRef StNonNull, StNull;
+    llvm::tie(StNonNull, StNull) =
+        State->assume(V.castAs<DefinedOrUnknownSVal>());
+
+    if (StNull && !StNonNull) {
+      if (!BT_cxx_call_null)
+        BT_cxx_call_null.reset(new BuiltinBug("Called C++ object pointer "
+                                              "is null"));
+      emitBadCall(BT_cxx_call_null.get(), C, CC->getCXXThisExpr());
+      return;
+    }
+
+    State = StNonNull;
+  }
+
+  // Don't check for uninitialized field values in arguments if the
+  // caller has a body that is available and we have the chance to inline it.
+  // This is a hack, but is a reasonable compromise betweens sometimes warning
+  // and sometimes not depending on if we decide to inline a function.
+  const Decl *D = Call.getDecl();
+  const bool checkUninitFields =
+    !(C.getAnalysisManager().shouldInlineCall() && (D && D->getBody()));
+
+  OwningPtr<BugType> *BT;
+  if (isa<ObjCMethodCall>(Call))
+    BT = &BT_msg_arg;
+  else
+    BT = &BT_call_arg;
+
+  for (unsigned i = 0, e = Call.getNumArgs(); i != e; ++i)
+    if (PreVisitProcessArg(C, Call.getArgSVal(i), Call.getArgSourceRange(i),
+                           Call.getArgExpr(i), /*IsFirstArgument=*/i == 0,
+                           checkUninitFields, Call, *BT))
+      return;
+
+  // If we make it here, record our assumptions about the callee.
+  C.addTransition(State);
+}
+
+void CallAndMessageChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
+                                                CheckerContext &C) const {
+  SVal recVal = msg.getReceiverSVal();
+  if (recVal.isUndef()) {
+    if (ExplodedNode *N = C.generateSink()) {
+      BugType *BT = 0;
+      switch (msg.getMessageKind()) {
+      case OCM_Message:
+        if (!BT_msg_undef)
+          BT_msg_undef.reset(new BuiltinBug("Receiver in message expression "
+                                            "is an uninitialized value"));
+        BT = BT_msg_undef.get();
+        break;
+      case OCM_PropertyAccess:
+        if (!BT_objc_prop_undef)
+          BT_objc_prop_undef.reset(new BuiltinBug("Property access on an "
+                                                  "uninitialized object "
+                                                  "pointer"));
+        BT = BT_objc_prop_undef.get();
+        break;
+      case OCM_Subscript:
+        if (!BT_objc_subscript_undef)
+          BT_objc_subscript_undef.reset(new BuiltinBug("Subscript access on an "
+                                                       "uninitialized object "
+                                                       "pointer"));
+        BT = BT_objc_subscript_undef.get();
+        break;
+      }
+      assert(BT && "Unknown message kind.");
+
+      BugReport *R = new BugReport(*BT, BT->getName(), N);
+      const ObjCMessageExpr *ME = msg.getOriginExpr();
+      R->addRange(ME->getReceiverRange());
+
+      // FIXME: getTrackNullOrUndefValueVisitor can't handle "super" yet.
+      if (const Expr *ReceiverE = ME->getInstanceReceiver())
+        bugreporter::trackNullOrUndefValue(N, ReceiverE, *R);
+      C.emitReport(R);
+    }
+    return;
+  } else {
+    // Bifurcate the state into nil and non-nil ones.
+    DefinedOrUnknownSVal receiverVal = recVal.castAs<DefinedOrUnknownSVal>();
+
+    ProgramStateRef state = C.getState();
+    ProgramStateRef notNilState, nilState;
+    llvm::tie(notNilState, nilState) = state->assume(receiverVal);
+
+    // Handle receiver must be nil.
+    if (nilState && !notNilState) {
+      HandleNilReceiver(C, state, msg);
+      return;
+    }
+  }
+}
+
+void CallAndMessageChecker::emitNilReceiverBug(CheckerContext &C,
+                                               const ObjCMethodCall &msg,
+                                               ExplodedNode *N) const {
+
+  if (!BT_msg_ret)
+    BT_msg_ret.reset(
+      new BuiltinBug("Receiver in message expression is 'nil'"));
+
+  const ObjCMessageExpr *ME = msg.getOriginExpr();
+
+  QualType ResTy = msg.getResultType();
+
+  SmallString<200> buf;
+  llvm::raw_svector_ostream os(buf);
+  os << "The receiver of message '" << ME->getSelector().getAsString()
+     << "' is nil";
+  if (ResTy->isReferenceType()) {
+    os << ", which results in forming a null reference";
+  } else {
+    os << " and returns a value of type '";
+    msg.getResultType().print(os, C.getLangOpts());
+    os << "' that will be garbage";
+  }
+
+  BugReport *report = new BugReport(*BT_msg_ret, os.str(), N);
+  report->addRange(ME->getReceiverRange());
+  // FIXME: This won't track "self" in messages to super.
+  if (const Expr *receiver = ME->getInstanceReceiver()) {
+    bugreporter::trackNullOrUndefValue(N, receiver, *report);
+  }
+  C.emitReport(report);
+}
+
+static bool supportsNilWithFloatRet(const llvm::Triple &triple) {
+  return (triple.getVendor() == llvm::Triple::Apple &&
+          (triple.getOS() == llvm::Triple::IOS ||
+           !triple.isMacOSXVersionLT(10,5)));
+}
+
+void CallAndMessageChecker::HandleNilReceiver(CheckerContext &C,
+                                              ProgramStateRef state,
+                                              const ObjCMethodCall &Msg) const {
+  ASTContext &Ctx = C.getASTContext();
+  static SimpleProgramPointTag Tag("CallAndMessageChecker : NilReceiver");
+
+  // Check the return type of the message expression.  A message to nil will
+  // return different values depending on the return type and the architecture.
+  QualType RetTy = Msg.getResultType();
+  CanQualType CanRetTy = Ctx.getCanonicalType(RetTy);
+  const LocationContext *LCtx = C.getLocationContext();
+
+  if (CanRetTy->isStructureOrClassType()) {
+    // Structure returns are safe since the compiler zeroes them out.
+    SVal V = C.getSValBuilder().makeZeroVal(RetTy);
+    C.addTransition(state->BindExpr(Msg.getOriginExpr(), LCtx, V), &Tag);
+    return;
+  }
+
+  // Other cases: check if sizeof(return type) > sizeof(void*)
+  if (CanRetTy != Ctx.VoidTy && C.getLocationContext()->getParentMap()
+                                  .isConsumedExpr(Msg.getOriginExpr())) {
+    // Compute: sizeof(void *) and sizeof(return type)
+    const uint64_t voidPtrSize = Ctx.getTypeSize(Ctx.VoidPtrTy);
+    const uint64_t returnTypeSize = Ctx.getTypeSize(CanRetTy);
+
+    if (CanRetTy.getTypePtr()->isReferenceType()||
+        (voidPtrSize < returnTypeSize &&
+         !(supportsNilWithFloatRet(Ctx.getTargetInfo().getTriple()) &&
+           (Ctx.FloatTy == CanRetTy ||
+            Ctx.DoubleTy == CanRetTy ||
+            Ctx.LongDoubleTy == CanRetTy ||
+            Ctx.LongLongTy == CanRetTy ||
+            Ctx.UnsignedLongLongTy == CanRetTy)))) {
+      if (ExplodedNode *N = C.generateSink(state, 0 , &Tag))
+        emitNilReceiverBug(C, Msg, N);
+      return;
+    }
+
+    // Handle the safe cases where the return value is 0 if the
+    // receiver is nil.
+    //
+    // FIXME: For now take the conservative approach that we only
+    // return null values if we *know* that the receiver is nil.
+    // This is because we can have surprises like:
+    //
+    //   ... = [[NSScreens screens] objectAtIndex:0];
+    //
+    // What can happen is that [... screens] could return nil, but
+    // it most likely isn't nil.  We should assume the semantics
+    // of this case unless we have *a lot* more knowledge.
+    //
+    SVal V = C.getSValBuilder().makeZeroVal(RetTy);
+    C.addTransition(state->BindExpr(Msg.getOriginExpr(), LCtx, V), &Tag);
+    return;
+  }
+
+  C.addTransition(state);
+}
+
+void ento::registerCallAndMessageChecker(CheckerManager &mgr) {
+  mgr.registerChecker<CallAndMessageChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
new file mode 100644
index 0000000..5e6e105
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
@@ -0,0 +1,86 @@
+//=== CastSizeChecker.cpp ---------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// CastSizeChecker checks when casting a malloc'ed symbolic region to type T,
+// whether the size of the symbolic region is a multiple of the size of T.
+//
+//===----------------------------------------------------------------------===//
+#include "ClangSACheckers.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class CastSizeChecker : public Checker< check::PreStmt<CastExpr> > {
+  mutable OwningPtr<BuiltinBug> BT;
+public:
+  void checkPreStmt(const CastExpr *CE, CheckerContext &C) const;
+};
+}
+
+void CastSizeChecker::checkPreStmt(const CastExpr *CE,CheckerContext &C) const {
+  const Expr *E = CE->getSubExpr();
+  ASTContext &Ctx = C.getASTContext();
+  QualType ToTy = Ctx.getCanonicalType(CE->getType());
+  const PointerType *ToPTy = dyn_cast<PointerType>(ToTy.getTypePtr());
+
+  if (!ToPTy)
+    return;
+
+  QualType ToPointeeTy = ToPTy->getPointeeType();
+
+  // Only perform the check if 'ToPointeeTy' is a complete type.
+  if (ToPointeeTy->isIncompleteType())
+    return;
+
+  ProgramStateRef state = C.getState();
+  const MemRegion *R = state->getSVal(E, C.getLocationContext()).getAsRegion();
+  if (R == 0)
+    return;
+
+  const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R);
+  if (SR == 0)
+    return;
+
+  SValBuilder &svalBuilder = C.getSValBuilder();
+  SVal extent = SR->getExtent(svalBuilder);
+  const llvm::APSInt *extentInt = svalBuilder.getKnownValue(state, extent);
+  if (!extentInt)
+    return;
+
+  CharUnits regionSize = CharUnits::fromQuantity(extentInt->getSExtValue());
+  CharUnits typeSize = C.getASTContext().getTypeSizeInChars(ToPointeeTy);
+
+  // Ignore void, and a few other un-sizeable types.
+  if (typeSize.isZero())
+    return;
+
+  if (regionSize % typeSize != 0) {
+    if (ExplodedNode *errorNode = C.generateSink()) {
+      if (!BT)
+        BT.reset(new BuiltinBug("Cast region with wrong size.",
+                            "Cast a region whose size is not a multiple of the"
+                            " destination type size."));
+      BugReport *R = new BugReport(*BT, BT->getDescription(),
+                                               errorNode);
+      R->addRange(CE->getSourceRange());
+      C.emitReport(R);
+    }
+  }
+}
+
+
+void ento::registerCastSizeChecker(CheckerManager &mgr) {
+  mgr.registerChecker<CastSizeChecker>();  
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
new file mode 100644
index 0000000..60348c7
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
@@ -0,0 +1,74 @@
+//=== CastToStructChecker.cpp - Fixed address usage checker ----*- C++ -*--===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This files defines CastToStructChecker, a builtin checker that checks for
+// cast from non-struct pointer to struct pointer.
+// This check corresponds to CWE-588.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class CastToStructChecker : public Checker< check::PreStmt<CastExpr> > {
+  mutable OwningPtr<BuiltinBug> BT;
+
+public:
+  void checkPreStmt(const CastExpr *CE, CheckerContext &C) const;
+};
+}
+
+void CastToStructChecker::checkPreStmt(const CastExpr *CE,
+                                       CheckerContext &C) const {
+  const Expr *E = CE->getSubExpr();
+  ASTContext &Ctx = C.getASTContext();
+  QualType OrigTy = Ctx.getCanonicalType(E->getType());
+  QualType ToTy = Ctx.getCanonicalType(CE->getType());
+
+  const PointerType *OrigPTy = dyn_cast<PointerType>(OrigTy.getTypePtr());
+  const PointerType *ToPTy = dyn_cast<PointerType>(ToTy.getTypePtr());
+
+  if (!ToPTy || !OrigPTy)
+    return;
+
+  QualType OrigPointeeTy = OrigPTy->getPointeeType();
+  QualType ToPointeeTy = ToPTy->getPointeeType();
+
+  if (!ToPointeeTy->isStructureOrClassType())
+    return;
+
+  // We allow cast from void*.
+  if (OrigPointeeTy->isVoidType())
+    return;
+
+  // Now the cast-to-type is struct pointer, the original type is not void*.
+  if (!OrigPointeeTy->isRecordType()) {
+    if (ExplodedNode *N = C.addTransition()) {
+      if (!BT)
+        BT.reset(new BuiltinBug("Cast from non-struct type to struct type",
+                            "Casting a non-structure type to a structure type "
+                            "and accessing a field can lead to memory access "
+                            "errors or data corruption."));
+      BugReport *R = new BugReport(*BT,BT->getDescription(), N);
+      R->addRange(CE->getSourceRange());
+      C.emitReport(R);
+    }
+  }
+}
+
+void ento::registerCastToStructChecker(CheckerManager &mgr) {
+  mgr.registerChecker<CastToStructChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
new file mode 100644
index 0000000..3f9b3cc
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
@@ -0,0 +1,292 @@
+//==- CheckObjCDealloc.cpp - Check ObjC -dealloc implementation --*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines a CheckObjCDealloc, a checker that
+//  analyzes an Objective-C class's implementation to determine if it
+//  correctly implements -dealloc.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+static bool scan_dealloc(Stmt *S, Selector Dealloc) {
+
+  if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S))
+    if (ME->getSelector() == Dealloc) {
+      switch (ME->getReceiverKind()) {
+      case ObjCMessageExpr::Instance: return false;
+      case ObjCMessageExpr::SuperInstance: return true;
+      case ObjCMessageExpr::Class: break;
+      case ObjCMessageExpr::SuperClass: break;
+      }
+    }
+
+  // Recurse to children.
+
+  for (Stmt::child_iterator I = S->child_begin(), E= S->child_end(); I!=E; ++I)
+    if (*I && scan_dealloc(*I, Dealloc))
+      return true;
+
+  return false;
+}
+
+static bool scan_ivar_release(Stmt *S, ObjCIvarDecl *ID,
+                              const ObjCPropertyDecl *PD,
+                              Selector Release,
+                              IdentifierInfo* SelfII,
+                              ASTContext &Ctx) {
+
+  // [mMyIvar release]
+  if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S))
+    if (ME->getSelector() == Release)
+      if (ME->getInstanceReceiver())
+        if (Expr *Receiver = ME->getInstanceReceiver()->IgnoreParenCasts())
+          if (ObjCIvarRefExpr *E = dyn_cast<ObjCIvarRefExpr>(Receiver))
+            if (E->getDecl() == ID)
+              return true;
+
+  // [self setMyIvar:nil];
+  if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S))
+    if (ME->getInstanceReceiver())
+      if (Expr *Receiver = ME->getInstanceReceiver()->IgnoreParenCasts())
+        if (DeclRefExpr *E = dyn_cast<DeclRefExpr>(Receiver))
+          if (E->getDecl()->getIdentifier() == SelfII)
+            if (ME->getMethodDecl() == PD->getSetterMethodDecl() &&
+                ME->getNumArgs() == 1 &&
+                ME->getArg(0)->isNullPointerConstant(Ctx, 
+                                              Expr::NPC_ValueDependentIsNull))
+              return true;
+
+  // self.myIvar = nil;
+  if (BinaryOperator* BO = dyn_cast<BinaryOperator>(S))
+    if (BO->isAssignmentOp())
+      if (ObjCPropertyRefExpr *PRE =
+           dyn_cast<ObjCPropertyRefExpr>(BO->getLHS()->IgnoreParenCasts()))
+        if (PRE->isExplicitProperty() && PRE->getExplicitProperty() == PD)
+            if (BO->getRHS()->isNullPointerConstant(Ctx, 
+                                            Expr::NPC_ValueDependentIsNull)) {
+              // This is only a 'release' if the property kind is not
+              // 'assign'.
+              return PD->getSetterKind() != ObjCPropertyDecl::Assign;
+            }
+
+  // Recurse to children.
+  for (Stmt::child_iterator I = S->child_begin(), E= S->child_end(); I!=E; ++I)
+    if (*I && scan_ivar_release(*I, ID, PD, Release, SelfII, Ctx))
+      return true;
+
+  return false;
+}
+
+static void checkObjCDealloc(const ObjCImplementationDecl *D,
+                             const LangOptions& LOpts, BugReporter& BR) {
+
+  assert (LOpts.getGC() != LangOptions::GCOnly);
+
+  ASTContext &Ctx = BR.getContext();
+  const ObjCInterfaceDecl *ID = D->getClassInterface();
+
+  // Does the class contain any ivars that are pointers (or id<...>)?
+  // If not, skip the check entirely.
+  // NOTE: This is motivated by PR 2517:
+  //        http://llvm.org/bugs/show_bug.cgi?id=2517
+
+  bool containsPointerIvar = false;
+
+  for (ObjCInterfaceDecl::ivar_iterator I=ID->ivar_begin(), E=ID->ivar_end();
+       I!=E; ++I) {
+
+    ObjCIvarDecl *ID = *I;
+    QualType T = ID->getType();
+
+    if (!T->isObjCObjectPointerType() ||
+        ID->getAttr<IBOutletAttr>() || // Skip IBOutlets.
+        ID->getAttr<IBOutletCollectionAttr>()) // Skip IBOutletCollections.
+      continue;
+
+    containsPointerIvar = true;
+    break;
+  }
+
+  if (!containsPointerIvar)
+    return;
+
+  // Determine if the class subclasses NSObject.
+  IdentifierInfo* NSObjectII = &Ctx.Idents.get("NSObject");
+  IdentifierInfo* SenTestCaseII = &Ctx.Idents.get("SenTestCase");
+
+
+  for ( ; ID ; ID = ID->getSuperClass()) {
+    IdentifierInfo *II = ID->getIdentifier();
+
+    if (II == NSObjectII)
+      break;
+
+    // FIXME: For now, ignore classes that subclass SenTestCase, as these don't
+    // need to implement -dealloc.  They implement tear down in another way,
+    // which we should try and catch later.
+    //  http://llvm.org/bugs/show_bug.cgi?id=3187
+    if (II == SenTestCaseII)
+      return;
+  }
+
+  if (!ID)
+    return;
+
+  // Get the "dealloc" selector.
+  IdentifierInfo* II = &Ctx.Idents.get("dealloc");
+  Selector S = Ctx.Selectors.getSelector(0, &II);
+  ObjCMethodDecl *MD = 0;
+
+  // Scan the instance methods for "dealloc".
+  for (ObjCImplementationDecl::instmeth_iterator I = D->instmeth_begin(),
+       E = D->instmeth_end(); I!=E; ++I) {
+
+    if ((*I)->getSelector() == S) {
+      MD = *I;
+      break;
+    }
+  }
+
+  PathDiagnosticLocation DLoc =
+    PathDiagnosticLocation::createBegin(D, BR.getSourceManager());
+
+  if (!MD) { // No dealloc found.
+
+    const char* name = LOpts.getGC() == LangOptions::NonGC
+                       ? "missing -dealloc"
+                       : "missing -dealloc (Hybrid MM, non-GC)";
+
+    std::string buf;
+    llvm::raw_string_ostream os(buf);
+    os << "Objective-C class '" << *D << "' lacks a 'dealloc' instance method";
+
+    BR.EmitBasicReport(D, name, categories::CoreFoundationObjectiveC,
+                       os.str(), DLoc);
+    return;
+  }
+
+  // dealloc found.  Scan for missing [super dealloc].
+  if (MD->getBody() && !scan_dealloc(MD->getBody(), S)) {
+
+    const char* name = LOpts.getGC() == LangOptions::NonGC
+                       ? "missing [super dealloc]"
+                       : "missing [super dealloc] (Hybrid MM, non-GC)";
+
+    std::string buf;
+    llvm::raw_string_ostream os(buf);
+    os << "The 'dealloc' instance method in Objective-C class '" << *D
+       << "' does not send a 'dealloc' message to its super class"
+           " (missing [super dealloc])";
+
+    BR.EmitBasicReport(MD, name, categories::CoreFoundationObjectiveC,
+                       os.str(), DLoc);
+    return;
+  }
+
+  // Get the "release" selector.
+  IdentifierInfo* RII = &Ctx.Idents.get("release");
+  Selector RS = Ctx.Selectors.getSelector(0, &RII);
+
+  // Get the "self" identifier
+  IdentifierInfo* SelfII = &Ctx.Idents.get("self");
+
+  // Scan for missing and extra releases of ivars used by implementations
+  // of synthesized properties
+  for (ObjCImplementationDecl::propimpl_iterator I = D->propimpl_begin(),
+       E = D->propimpl_end(); I!=E; ++I) {
+
+    // We can only check the synthesized properties
+    if (I->getPropertyImplementation() != ObjCPropertyImplDecl::Synthesize)
+      continue;
+
+    ObjCIvarDecl *ID = I->getPropertyIvarDecl();
+    if (!ID)
+      continue;
+
+    QualType T = ID->getType();
+    if (!T->isObjCObjectPointerType()) // Skip non-pointer ivars
+      continue;
+
+    const ObjCPropertyDecl *PD = I->getPropertyDecl();
+    if (!PD)
+      continue;
+
+    // ivars cannot be set via read-only properties, so we'll skip them
+    if (PD->isReadOnly())
+      continue;
+
+    // ivar must be released if and only if the kind of setter was not 'assign'
+    bool requiresRelease = PD->getSetterKind() != ObjCPropertyDecl::Assign;
+    if (scan_ivar_release(MD->getBody(), ID, PD, RS, SelfII, Ctx)
+       != requiresRelease) {
+      const char *name = 0;
+      std::string buf;
+      llvm::raw_string_ostream os(buf);
+
+      if (requiresRelease) {
+        name = LOpts.getGC() == LangOptions::NonGC
+               ? "missing ivar release (leak)"
+               : "missing ivar release (Hybrid MM, non-GC)";
+
+        os << "The '" << *ID
+           << "' instance variable was retained by a synthesized property but "
+              "wasn't released in 'dealloc'";
+      } else {
+        name = LOpts.getGC() == LangOptions::NonGC
+               ? "extra ivar release (use-after-release)"
+               : "extra ivar release (Hybrid MM, non-GC)";
+
+        os << "The '" << *ID
+           << "' instance variable was not retained by a synthesized property "
+              "but was released in 'dealloc'";
+      }
+
+      PathDiagnosticLocation SDLoc =
+        PathDiagnosticLocation::createBegin(*I, BR.getSourceManager());
+
+      BR.EmitBasicReport(MD, name, categories::CoreFoundationObjectiveC,
+                         os.str(), SDLoc);
+    }
+  }
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCDeallocChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ObjCDeallocChecker : public Checker<
+                                      check::ASTDecl<ObjCImplementationDecl> > {
+public:
+  void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager& mgr,
+                    BugReporter &BR) const {
+    if (mgr.getLangOpts().getGC() == LangOptions::GCOnly)
+      return;
+    checkObjCDealloc(cast<ObjCImplementationDecl>(D), mgr.getLangOpts(), BR);
+  }
+};
+}
+
+void ento::registerObjCDeallocChecker(CheckerManager &mgr) {
+  mgr.registerChecker<ObjCDeallocChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
new file mode 100644
index 0000000..9cb1d2d
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
@@ -0,0 +1,145 @@
+//=- CheckObjCInstMethodRetTy.cpp - Check ObjC method signatures -*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines a CheckObjCInstMethSignature, a flow-insenstive check
+//  that determines if an Objective-C class interface incorrectly redefines
+//  the method signature in a subclass.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Type.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+static bool AreTypesCompatible(QualType Derived, QualType Ancestor,
+                               ASTContext &C) {
+
+  // Right now don't compare the compatibility of pointers.  That involves
+  // looking at subtyping relationships.  FIXME: Future patch.
+  if (Derived->isAnyPointerType() &&  Ancestor->isAnyPointerType())
+    return true;
+
+  return C.typesAreCompatible(Derived, Ancestor);
+}
+
+static void CompareReturnTypes(const ObjCMethodDecl *MethDerived,
+                               const ObjCMethodDecl *MethAncestor,
+                               BugReporter &BR, ASTContext &Ctx,
+                               const ObjCImplementationDecl *ID) {
+
+  QualType ResDerived  = MethDerived->getResultType();
+  QualType ResAncestor = MethAncestor->getResultType();
+
+  if (!AreTypesCompatible(ResDerived, ResAncestor, Ctx)) {
+    std::string sbuf;
+    llvm::raw_string_ostream os(sbuf);
+
+    os << "The Objective-C class '"
+       << *MethDerived->getClassInterface()
+       << "', which is derived from class '"
+       << *MethAncestor->getClassInterface()
+       << "', defines the instance method '"
+       << MethDerived->getSelector().getAsString()
+       << "' whose return type is '"
+       << ResDerived.getAsString()
+       << "'.  A method with the same name (same selector) is also defined in "
+          "class '"
+       << *MethAncestor->getClassInterface()
+       << "' and has a return type of '"
+       << ResAncestor.getAsString()
+       << "'.  These two types are incompatible, and may result in undefined "
+          "behavior for clients of these classes.";
+
+    PathDiagnosticLocation MethDLoc =
+      PathDiagnosticLocation::createBegin(MethDerived,
+                                          BR.getSourceManager());
+
+    BR.EmitBasicReport(MethDerived,
+                       "Incompatible instance method return type",
+                       categories::CoreFoundationObjectiveC,
+                       os.str(), MethDLoc);
+  }
+}
+
+static void CheckObjCInstMethSignature(const ObjCImplementationDecl *ID,
+                                       BugReporter& BR) {
+
+  const ObjCInterfaceDecl *D = ID->getClassInterface();
+  const ObjCInterfaceDecl *C = D->getSuperClass();
+
+  if (!C)
+    return;
+
+  ASTContext &Ctx = BR.getContext();
+
+  // Build a DenseMap of the methods for quick querying.
+  typedef llvm::DenseMap<Selector,ObjCMethodDecl*> MapTy;
+  MapTy IMeths;
+  unsigned NumMethods = 0;
+
+  for (ObjCImplementationDecl::instmeth_iterator I=ID->instmeth_begin(),
+       E=ID->instmeth_end(); I!=E; ++I) {
+
+    ObjCMethodDecl *M = *I;
+    IMeths[M->getSelector()] = M;
+    ++NumMethods;
+  }
+
+  // Now recurse the class hierarchy chain looking for methods with the
+  // same signatures.
+  while (C && NumMethods) {
+    for (ObjCInterfaceDecl::instmeth_iterator I=C->instmeth_begin(),
+         E=C->instmeth_end(); I!=E; ++I) {
+
+      ObjCMethodDecl *M = *I;
+      Selector S = M->getSelector();
+
+      MapTy::iterator MI = IMeths.find(S);
+
+      if (MI == IMeths.end() || MI->second == 0)
+        continue;
+
+      --NumMethods;
+      ObjCMethodDecl *MethDerived = MI->second;
+      MI->second = 0;
+
+      CompareReturnTypes(MethDerived, M, BR, Ctx, ID);
+    }
+
+    C = C->getSuperClass();
+  }
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCMethSigsChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ObjCMethSigsChecker : public Checker<
+                                      check::ASTDecl<ObjCImplementationDecl> > {
+public:
+  void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager& mgr,
+                    BugReporter &BR) const {
+    CheckObjCInstMethSignature(D, BR);
+  }
+};
+}
+
+void ento::registerObjCMethSigsChecker(CheckerManager &mgr) {
+  mgr.registerChecker<ObjCMethSigsChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
new file mode 100644
index 0000000..63080ea
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
@@ -0,0 +1,781 @@
+//==- CheckSecuritySyntaxOnly.cpp - Basic security checks --------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines a set of flow-insensitive security checks.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+static bool isArc4RandomAvailable(const ASTContext &Ctx) {
+  const llvm::Triple &T = Ctx.getTargetInfo().getTriple();
+  return T.getVendor() == llvm::Triple::Apple ||
+         T.getOS() == llvm::Triple::FreeBSD ||
+         T.getOS() == llvm::Triple::NetBSD ||
+         T.getOS() == llvm::Triple::OpenBSD ||
+         T.getOS() == llvm::Triple::Bitrig ||
+         T.getOS() == llvm::Triple::DragonFly;
+}
+
+namespace {
+struct ChecksFilter {
+  DefaultBool check_gets;
+  DefaultBool check_getpw;
+  DefaultBool check_mktemp;
+  DefaultBool check_mkstemp;
+  DefaultBool check_strcpy;
+  DefaultBool check_rand;
+  DefaultBool check_vfork;
+  DefaultBool check_FloatLoopCounter;
+  DefaultBool check_UncheckedReturn;
+};
+  
+class WalkAST : public StmtVisitor<WalkAST> {
+  BugReporter &BR;
+  AnalysisDeclContext* AC;
+  enum { num_setids = 6 };
+  IdentifierInfo *II_setid[num_setids];
+
+  const bool CheckRand;
+  const ChecksFilter &filter;
+
+public:
+  WalkAST(BugReporter &br, AnalysisDeclContext* ac,
+          const ChecksFilter &f)
+  : BR(br), AC(ac), II_setid(),
+    CheckRand(isArc4RandomAvailable(BR.getContext())),
+    filter(f) {}
+
+  // Statement visitor methods.
+  void VisitCallExpr(CallExpr *CE);
+  void VisitForStmt(ForStmt *S);
+  void VisitCompoundStmt (CompoundStmt *S);
+  void VisitStmt(Stmt *S) { VisitChildren(S); }
+
+  void VisitChildren(Stmt *S);
+
+  // Helpers.
+  bool checkCall_strCommon(const CallExpr *CE, const FunctionDecl *FD);
+
+  typedef void (WalkAST::*FnCheck)(const CallExpr *,
+				   const FunctionDecl *);
+
+  // Checker-specific methods.
+  void checkLoopConditionForFloat(const ForStmt *FS);
+  void checkCall_gets(const CallExpr *CE, const FunctionDecl *FD);
+  void checkCall_getpw(const CallExpr *CE, const FunctionDecl *FD);
+  void checkCall_mktemp(const CallExpr *CE, const FunctionDecl *FD);
+  void checkCall_mkstemp(const CallExpr *CE, const FunctionDecl *FD);
+  void checkCall_strcpy(const CallExpr *CE, const FunctionDecl *FD);
+  void checkCall_strcat(const CallExpr *CE, const FunctionDecl *FD);
+  void checkCall_rand(const CallExpr *CE, const FunctionDecl *FD);
+  void checkCall_random(const CallExpr *CE, const FunctionDecl *FD);
+  void checkCall_vfork(const CallExpr *CE, const FunctionDecl *FD);
+  void checkUncheckedReturnValue(CallExpr *CE);
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// AST walking.
+//===----------------------------------------------------------------------===//
+
+void WalkAST::VisitChildren(Stmt *S) {
+  for (Stmt::child_iterator I = S->child_begin(), E = S->child_end(); I!=E; ++I)
+    if (Stmt *child = *I)
+      Visit(child);
+}
+
+void WalkAST::VisitCallExpr(CallExpr *CE) {
+  // Get the callee.  
+  const FunctionDecl *FD = CE->getDirectCallee();
+
+  if (!FD)
+    return;
+
+  // Get the name of the callee. If it's a builtin, strip off the prefix.
+  IdentifierInfo *II = FD->getIdentifier();
+  if (!II)   // if no identifier, not a simple C function
+    return;
+  StringRef Name = II->getName();
+  if (Name.startswith("__builtin_"))
+    Name = Name.substr(10);
+
+  // Set the evaluation function by switching on the callee name.
+  FnCheck evalFunction = llvm::StringSwitch<FnCheck>(Name)
+    .Case("gets", &WalkAST::checkCall_gets)
+    .Case("getpw", &WalkAST::checkCall_getpw)
+    .Case("mktemp", &WalkAST::checkCall_mktemp)
+    .Case("mkstemp", &WalkAST::checkCall_mkstemp)
+    .Case("mkdtemp", &WalkAST::checkCall_mkstemp)
+    .Case("mkstemps", &WalkAST::checkCall_mkstemp)
+    .Cases("strcpy", "__strcpy_chk", &WalkAST::checkCall_strcpy)
+    .Cases("strcat", "__strcat_chk", &WalkAST::checkCall_strcat)
+    .Case("drand48", &WalkAST::checkCall_rand)
+    .Case("erand48", &WalkAST::checkCall_rand)
+    .Case("jrand48", &WalkAST::checkCall_rand)
+    .Case("lrand48", &WalkAST::checkCall_rand)
+    .Case("mrand48", &WalkAST::checkCall_rand)
+    .Case("nrand48", &WalkAST::checkCall_rand)
+    .Case("lcong48", &WalkAST::checkCall_rand)
+    .Case("rand", &WalkAST::checkCall_rand)
+    .Case("rand_r", &WalkAST::checkCall_rand)
+    .Case("random", &WalkAST::checkCall_random)
+    .Case("vfork", &WalkAST::checkCall_vfork)
+    .Default(NULL);
+
+  // If the callee isn't defined, it is not of security concern.
+  // Check and evaluate the call.
+  if (evalFunction)
+    (this->*evalFunction)(CE, FD);
+
+  // Recurse and check children.
+  VisitChildren(CE);
+}
+
+void WalkAST::VisitCompoundStmt(CompoundStmt *S) {
+  for (Stmt::child_iterator I = S->child_begin(), E = S->child_end(); I!=E; ++I)
+    if (Stmt *child = *I) {
+      if (CallExpr *CE = dyn_cast<CallExpr>(child))
+        checkUncheckedReturnValue(CE);
+      Visit(child);
+    }
+}
+
+void WalkAST::VisitForStmt(ForStmt *FS) {
+  checkLoopConditionForFloat(FS);
+
+  // Recurse and check children.
+  VisitChildren(FS);
+}
+
+//===----------------------------------------------------------------------===//
+// Check: floating poing variable used as loop counter.
+// Originally: <rdar://problem/6336718>
+// Implements: CERT security coding advisory FLP-30.
+//===----------------------------------------------------------------------===//
+
+static const DeclRefExpr*
+getIncrementedVar(const Expr *expr, const VarDecl *x, const VarDecl *y) {
+  expr = expr->IgnoreParenCasts();
+
+  if (const BinaryOperator *B = dyn_cast<BinaryOperator>(expr)) {
+    if (!(B->isAssignmentOp() || B->isCompoundAssignmentOp() ||
+          B->getOpcode() == BO_Comma))
+      return NULL;
+
+    if (const DeclRefExpr *lhs = getIncrementedVar(B->getLHS(), x, y))
+      return lhs;
+
+    if (const DeclRefExpr *rhs = getIncrementedVar(B->getRHS(), x, y))
+      return rhs;
+
+    return NULL;
+  }
+
+  if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(expr)) {
+    const NamedDecl *ND = DR->getDecl();
+    return ND == x || ND == y ? DR : NULL;
+  }
+
+  if (const UnaryOperator *U = dyn_cast<UnaryOperator>(expr))
+    return U->isIncrementDecrementOp()
+      ? getIncrementedVar(U->getSubExpr(), x, y) : NULL;
+
+  return NULL;
+}
+
+/// CheckLoopConditionForFloat - This check looks for 'for' statements that
+///  use a floating point variable as a loop counter.
+///  CERT: FLP30-C, FLP30-CPP.
+///
+void WalkAST::checkLoopConditionForFloat(const ForStmt *FS) {
+  if (!filter.check_FloatLoopCounter)
+    return;
+
+  // Does the loop have a condition?
+  const Expr *condition = FS->getCond();
+
+  if (!condition)
+    return;
+
+  // Does the loop have an increment?
+  const Expr *increment = FS->getInc();
+
+  if (!increment)
+    return;
+
+  // Strip away '()' and casts.
+  condition = condition->IgnoreParenCasts();
+  increment = increment->IgnoreParenCasts();
+
+  // Is the loop condition a comparison?
+  const BinaryOperator *B = dyn_cast<BinaryOperator>(condition);
+
+  if (!B)
+    return;
+
+  // Is this a comparison?
+  if (!(B->isRelationalOp() || B->isEqualityOp()))
+    return;
+
+  // Are we comparing variables?
+  const DeclRefExpr *drLHS =
+    dyn_cast<DeclRefExpr>(B->getLHS()->IgnoreParenLValueCasts());
+  const DeclRefExpr *drRHS =
+    dyn_cast<DeclRefExpr>(B->getRHS()->IgnoreParenLValueCasts());
+
+  // Does at least one of the variables have a floating point type?
+  drLHS = drLHS && drLHS->getType()->isRealFloatingType() ? drLHS : NULL;
+  drRHS = drRHS && drRHS->getType()->isRealFloatingType() ? drRHS : NULL;
+
+  if (!drLHS && !drRHS)
+    return;
+
+  const VarDecl *vdLHS = drLHS ? dyn_cast<VarDecl>(drLHS->getDecl()) : NULL;
+  const VarDecl *vdRHS = drRHS ? dyn_cast<VarDecl>(drRHS->getDecl()) : NULL;
+
+  if (!vdLHS && !vdRHS)
+    return;
+
+  // Does either variable appear in increment?
+  const DeclRefExpr *drInc = getIncrementedVar(increment, vdLHS, vdRHS);
+
+  if (!drInc)
+    return;
+
+  // Emit the error.  First figure out which DeclRefExpr in the condition
+  // referenced the compared variable.
+  assert(drInc->getDecl());
+  const DeclRefExpr *drCond = vdLHS == drInc->getDecl() ? drLHS : drRHS;
+
+  SmallVector<SourceRange, 2> ranges;
+  SmallString<256> sbuf;
+  llvm::raw_svector_ostream os(sbuf);
+
+  os << "Variable '" << drCond->getDecl()->getName()
+     << "' with floating point type '" << drCond->getType().getAsString()
+     << "' should not be used as a loop counter";
+
+  ranges.push_back(drCond->getSourceRange());
+  ranges.push_back(drInc->getSourceRange());
+
+  const char *bugType = "Floating point variable used as loop counter";
+
+  PathDiagnosticLocation FSLoc =
+    PathDiagnosticLocation::createBegin(FS, BR.getSourceManager(), AC);
+  BR.EmitBasicReport(AC->getDecl(),
+                     bugType, "Security", os.str(),
+                     FSLoc, ranges.data(), ranges.size());
+}
+
+//===----------------------------------------------------------------------===//
+// Check: Any use of 'gets' is insecure.
+// Originally: <rdar://problem/6335715>
+// Implements (part of): 300-BSI (buildsecurityin.us-cert.gov)
+// CWE-242: Use of Inherently Dangerous Function
+//===----------------------------------------------------------------------===//
+
+void WalkAST::checkCall_gets(const CallExpr *CE, const FunctionDecl *FD) {
+  if (!filter.check_gets)
+    return;
+  
+  const FunctionProtoType *FPT
+    = dyn_cast<FunctionProtoType>(FD->getType().IgnoreParens());
+  if (!FPT)
+    return;
+
+  // Verify that the function takes a single argument.
+  if (FPT->getNumArgs() != 1)
+    return;
+
+  // Is the argument a 'char*'?
+  const PointerType *PT = dyn_cast<PointerType>(FPT->getArgType(0));
+  if (!PT)
+    return;
+
+  if (PT->getPointeeType().getUnqualifiedType() != BR.getContext().CharTy)
+    return;
+
+  // Issue a warning.
+  SourceRange R = CE->getCallee()->getSourceRange();
+  PathDiagnosticLocation CELoc =
+    PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+  BR.EmitBasicReport(AC->getDecl(),
+                     "Potential buffer overflow in call to 'gets'",
+                     "Security",
+                     "Call to function 'gets' is extremely insecure as it can "
+                     "always result in a buffer overflow",
+                     CELoc, &R, 1);
+}
+
+//===----------------------------------------------------------------------===//
+// Check: Any use of 'getpwd' is insecure.
+// CWE-477: Use of Obsolete Functions
+//===----------------------------------------------------------------------===//
+
+void WalkAST::checkCall_getpw(const CallExpr *CE, const FunctionDecl *FD) {
+  if (!filter.check_getpw)
+    return;
+
+  const FunctionProtoType *FPT
+    = dyn_cast<FunctionProtoType>(FD->getType().IgnoreParens());
+  if (!FPT)
+    return;
+
+  // Verify that the function takes two arguments.
+  if (FPT->getNumArgs() != 2)
+    return;
+
+  // Verify the first argument type is integer.
+  if (!FPT->getArgType(0)->isIntegralOrUnscopedEnumerationType())
+    return;
+
+  // Verify the second argument type is char*.
+  const PointerType *PT = dyn_cast<PointerType>(FPT->getArgType(1));
+  if (!PT)
+    return;
+
+  if (PT->getPointeeType().getUnqualifiedType() != BR.getContext().CharTy)
+    return;
+
+  // Issue a warning.
+  SourceRange R = CE->getCallee()->getSourceRange();
+  PathDiagnosticLocation CELoc =
+    PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+  BR.EmitBasicReport(AC->getDecl(),
+                     "Potential buffer overflow in call to 'getpw'",
+                     "Security",
+                     "The getpw() function is dangerous as it may overflow the "
+                     "provided buffer. It is obsoleted by getpwuid().",
+                     CELoc, &R, 1);
+}
+
+//===----------------------------------------------------------------------===//
+// Check: Any use of 'mktemp' is insecure.  It is obsoleted by mkstemp().
+// CWE-377: Insecure Temporary File
+//===----------------------------------------------------------------------===//
+
+void WalkAST::checkCall_mktemp(const CallExpr *CE, const FunctionDecl *FD) {
+  if (!filter.check_mktemp) {
+    // Fall back to the security check of looking for enough 'X's in the
+    // format string, since that is a less severe warning.
+    checkCall_mkstemp(CE, FD);
+    return;
+  }
+
+  const FunctionProtoType *FPT
+    = dyn_cast<FunctionProtoType>(FD->getType().IgnoreParens());
+  if(!FPT)
+    return;
+
+  // Verify that the function takes a single argument.
+  if (FPT->getNumArgs() != 1)
+    return;
+
+  // Verify that the argument is Pointer Type.
+  const PointerType *PT = dyn_cast<PointerType>(FPT->getArgType(0));
+  if (!PT)
+    return;
+
+  // Verify that the argument is a 'char*'.
+  if (PT->getPointeeType().getUnqualifiedType() != BR.getContext().CharTy)
+    return;
+
+  // Issue a waring.
+  SourceRange R = CE->getCallee()->getSourceRange();
+  PathDiagnosticLocation CELoc =
+    PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+  BR.EmitBasicReport(AC->getDecl(),
+                     "Potential insecure temporary file in call 'mktemp'",
+                     "Security",
+                     "Call to function 'mktemp' is insecure as it always "
+                     "creates or uses insecure temporary file.  Use 'mkstemp' "
+                     "instead",
+                     CELoc, &R, 1);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Check: Use of 'mkstemp', 'mktemp', 'mkdtemp' should contain at least 6 X's.
+//===----------------------------------------------------------------------===//
+
+void WalkAST::checkCall_mkstemp(const CallExpr *CE, const FunctionDecl *FD) {
+  if (!filter.check_mkstemp)
+    return;
+
+  StringRef Name = FD->getIdentifier()->getName();
+  std::pair<signed, signed> ArgSuffix =
+    llvm::StringSwitch<std::pair<signed, signed> >(Name)
+      .Case("mktemp", std::make_pair(0,-1))
+      .Case("mkstemp", std::make_pair(0,-1))
+      .Case("mkdtemp", std::make_pair(0,-1))
+      .Case("mkstemps", std::make_pair(0,1))
+      .Default(std::make_pair(-1, -1));
+  
+  assert(ArgSuffix.first >= 0 && "Unsupported function");
+
+  // Check if the number of arguments is consistent with out expectations.
+  unsigned numArgs = CE->getNumArgs();
+  if ((signed) numArgs <= ArgSuffix.first)
+    return;
+  
+  const StringLiteral *strArg =
+    dyn_cast<StringLiteral>(CE->getArg((unsigned)ArgSuffix.first)
+                              ->IgnoreParenImpCasts());
+  
+  // Currently we only handle string literals.  It is possible to do better,
+  // either by looking at references to const variables, or by doing real
+  // flow analysis.
+  if (!strArg || strArg->getCharByteWidth() != 1)
+    return;
+
+  // Count the number of X's, taking into account a possible cutoff suffix.
+  StringRef str = strArg->getString();
+  unsigned numX = 0;
+  unsigned n = str.size();
+
+  // Take into account the suffix.
+  unsigned suffix = 0;
+  if (ArgSuffix.second >= 0) {
+    const Expr *suffixEx = CE->getArg((unsigned)ArgSuffix.second);
+    llvm::APSInt Result;
+    if (!suffixEx->EvaluateAsInt(Result, BR.getContext()))
+      return;
+    // FIXME: Issue a warning.
+    if (Result.isNegative())
+      return;
+    suffix = (unsigned) Result.getZExtValue();
+    n = (n > suffix) ? n - suffix : 0;
+  }
+  
+  for (unsigned i = 0; i < n; ++i)
+    if (str[i] == 'X') ++numX;
+  
+  if (numX >= 6)
+    return;
+  
+  // Issue a warning.
+  SourceRange R = strArg->getSourceRange();
+  PathDiagnosticLocation CELoc =
+    PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+  SmallString<512> buf;
+  llvm::raw_svector_ostream out(buf);
+  out << "Call to '" << Name << "' should have at least 6 'X's in the"
+    " format string to be secure (" << numX << " 'X'";
+  if (numX != 1)
+    out << 's';
+  out << " seen";
+  if (suffix) {
+    out << ", " << suffix << " character";
+    if (suffix > 1)
+      out << 's';
+    out << " used as a suffix";
+  }
+  out << ')';
+  BR.EmitBasicReport(AC->getDecl(),
+                     "Insecure temporary file creation", "Security",
+                     out.str(), CELoc, &R, 1);
+}
+
+//===----------------------------------------------------------------------===//
+// Check: Any use of 'strcpy' is insecure.
+//
+// CWE-119: Improper Restriction of Operations within 
+// the Bounds of a Memory Buffer 
+//===----------------------------------------------------------------------===//
+void WalkAST::checkCall_strcpy(const CallExpr *CE, const FunctionDecl *FD) {
+  if (!filter.check_strcpy)
+    return;
+  
+  if (!checkCall_strCommon(CE, FD))
+    return;
+
+  // Issue a warning.
+  SourceRange R = CE->getCallee()->getSourceRange();
+  PathDiagnosticLocation CELoc =
+    PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+  BR.EmitBasicReport(AC->getDecl(),
+                     "Potential insecure memory buffer bounds restriction in "
+                     "call 'strcpy'",
+                     "Security",
+                     "Call to function 'strcpy' is insecure as it does not "
+                     "provide bounding of the memory buffer. Replace "
+                     "unbounded copy functions with analogous functions that "
+                     "support length arguments such as 'strlcpy'. CWE-119.",
+                     CELoc, &R, 1);
+}
+
+//===----------------------------------------------------------------------===//
+// Check: Any use of 'strcat' is insecure.
+//
+// CWE-119: Improper Restriction of Operations within 
+// the Bounds of a Memory Buffer 
+//===----------------------------------------------------------------------===//
+void WalkAST::checkCall_strcat(const CallExpr *CE, const FunctionDecl *FD) {
+  if (!filter.check_strcpy)
+    return;
+
+  if (!checkCall_strCommon(CE, FD))
+    return;
+
+  // Issue a warning.
+  SourceRange R = CE->getCallee()->getSourceRange();
+  PathDiagnosticLocation CELoc =
+    PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+  BR.EmitBasicReport(AC->getDecl(),
+                     "Potential insecure memory buffer bounds restriction in "
+                     "call 'strcat'",
+                     "Security",
+                     "Call to function 'strcat' is insecure as it does not "
+                     "provide bounding of the memory buffer. Replace "
+                     "unbounded copy functions with analogous functions that "
+                     "support length arguments such as 'strlcat'. CWE-119.",
+                     CELoc, &R, 1);
+}
+
+//===----------------------------------------------------------------------===//
+// Common check for str* functions with no bounds parameters.
+//===----------------------------------------------------------------------===//
+bool WalkAST::checkCall_strCommon(const CallExpr *CE, const FunctionDecl *FD) {
+  const FunctionProtoType *FPT
+    = dyn_cast<FunctionProtoType>(FD->getType().IgnoreParens());
+  if (!FPT)
+    return false;
+
+  // Verify the function takes two arguments, three in the _chk version.
+  int numArgs = FPT->getNumArgs();
+  if (numArgs != 2 && numArgs != 3)
+    return false;
+
+  // Verify the type for both arguments.
+  for (int i = 0; i < 2; i++) {
+    // Verify that the arguments are pointers.
+    const PointerType *PT = dyn_cast<PointerType>(FPT->getArgType(i));
+    if (!PT)
+      return false;
+
+    // Verify that the argument is a 'char*'.
+    if (PT->getPointeeType().getUnqualifiedType() != BR.getContext().CharTy)
+      return false;
+  }
+
+  return true;
+}
+
+//===----------------------------------------------------------------------===//
+// Check: Linear congruent random number generators should not be used
+// Originally: <rdar://problem/63371000>
+// CWE-338: Use of cryptographically weak prng
+//===----------------------------------------------------------------------===//
+
+void WalkAST::checkCall_rand(const CallExpr *CE, const FunctionDecl *FD) {
+  if (!filter.check_rand || !CheckRand)
+    return;
+
+  const FunctionProtoType *FTP
+    = dyn_cast<FunctionProtoType>(FD->getType().IgnoreParens());
+  if (!FTP)
+    return;
+
+  if (FTP->getNumArgs() == 1) {
+    // Is the argument an 'unsigned short *'?
+    // (Actually any integer type is allowed.)
+    const PointerType *PT = dyn_cast<PointerType>(FTP->getArgType(0));
+    if (!PT)
+      return;
+
+    if (! PT->getPointeeType()->isIntegralOrUnscopedEnumerationType())
+      return;
+  }
+  else if (FTP->getNumArgs() != 0)
+    return;
+
+  // Issue a warning.
+  SmallString<256> buf1;
+  llvm::raw_svector_ostream os1(buf1);
+  os1 << '\'' << *FD << "' is a poor random number generator";
+
+  SmallString<256> buf2;
+  llvm::raw_svector_ostream os2(buf2);
+  os2 << "Function '" << *FD
+      << "' is obsolete because it implements a poor random number generator."
+      << "  Use 'arc4random' instead";
+
+  SourceRange R = CE->getCallee()->getSourceRange();
+  PathDiagnosticLocation CELoc =
+    PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+  BR.EmitBasicReport(AC->getDecl(), os1.str(), "Security", os2.str(),
+                     CELoc, &R, 1);
+}
+
+//===----------------------------------------------------------------------===//
+// Check: 'random' should not be used
+// Originally: <rdar://problem/63371000>
+//===----------------------------------------------------------------------===//
+
+void WalkAST::checkCall_random(const CallExpr *CE, const FunctionDecl *FD) {
+  if (!CheckRand || !filter.check_rand)
+    return;
+
+  const FunctionProtoType *FTP
+    = dyn_cast<FunctionProtoType>(FD->getType().IgnoreParens());
+  if (!FTP)
+    return;
+
+  // Verify that the function takes no argument.
+  if (FTP->getNumArgs() != 0)
+    return;
+
+  // Issue a warning.
+  SourceRange R = CE->getCallee()->getSourceRange();
+  PathDiagnosticLocation CELoc =
+    PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+  BR.EmitBasicReport(AC->getDecl(),
+                     "'random' is not a secure random number generator",
+                     "Security",
+                     "The 'random' function produces a sequence of values that "
+                     "an adversary may be able to predict.  Use 'arc4random' "
+                     "instead", CELoc, &R, 1);
+}
+
+//===----------------------------------------------------------------------===//
+// Check: 'vfork' should not be used.
+// POS33-C: Do not use vfork().
+//===----------------------------------------------------------------------===//
+
+void WalkAST::checkCall_vfork(const CallExpr *CE, const FunctionDecl *FD) {
+  if (!filter.check_vfork)
+    return;
+
+  // All calls to vfork() are insecure, issue a warning.
+  SourceRange R = CE->getCallee()->getSourceRange();
+  PathDiagnosticLocation CELoc =
+    PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+  BR.EmitBasicReport(AC->getDecl(),
+                     "Potential insecure implementation-specific behavior in "
+                     "call 'vfork'",
+                     "Security",
+                     "Call to function 'vfork' is insecure as it can lead to "
+                     "denial of service situations in the parent process. "
+                     "Replace calls to vfork with calls to the safer "
+                     "'posix_spawn' function",
+                     CELoc, &R, 1);
+}
+
+//===----------------------------------------------------------------------===//
+// Check: Should check whether privileges are dropped successfully.
+// Originally: <rdar://problem/6337132>
+//===----------------------------------------------------------------------===//
+
+void WalkAST::checkUncheckedReturnValue(CallExpr *CE) {
+  if (!filter.check_UncheckedReturn)
+    return;
+  
+  const FunctionDecl *FD = CE->getDirectCallee();
+  if (!FD)
+    return;
+
+  if (II_setid[0] == NULL) {
+    static const char * const identifiers[num_setids] = {
+      "setuid", "setgid", "seteuid", "setegid",
+      "setreuid", "setregid"
+    };
+
+    for (size_t i = 0; i < num_setids; i++)
+      II_setid[i] = &BR.getContext().Idents.get(identifiers[i]);
+  }
+
+  const IdentifierInfo *id = FD->getIdentifier();
+  size_t identifierid;
+
+  for (identifierid = 0; identifierid < num_setids; identifierid++)
+    if (id == II_setid[identifierid])
+      break;
+
+  if (identifierid >= num_setids)
+    return;
+
+  const FunctionProtoType *FTP
+    = dyn_cast<FunctionProtoType>(FD->getType().IgnoreParens());
+  if (!FTP)
+    return;
+
+  // Verify that the function takes one or two arguments (depending on
+  //   the function).
+  if (FTP->getNumArgs() != (identifierid < 4 ? 1 : 2))
+    return;
+
+  // The arguments must be integers.
+  for (unsigned i = 0; i < FTP->getNumArgs(); i++)
+    if (! FTP->getArgType(i)->isIntegralOrUnscopedEnumerationType())
+      return;
+
+  // Issue a warning.
+  SmallString<256> buf1;
+  llvm::raw_svector_ostream os1(buf1);
+  os1 << "Return value is not checked in call to '" << *FD << '\'';
+
+  SmallString<256> buf2;
+  llvm::raw_svector_ostream os2(buf2);
+  os2 << "The return value from the call to '" << *FD
+      << "' is not checked.  If an error occurs in '" << *FD
+      << "', the following code may execute with unexpected privileges";
+
+  SourceRange R = CE->getCallee()->getSourceRange();
+  PathDiagnosticLocation CELoc =
+    PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+  BR.EmitBasicReport(AC->getDecl(), os1.str(), "Security", os2.str(),
+                     CELoc, &R, 1);
+}
+
+//===----------------------------------------------------------------------===//
+// SecuritySyntaxChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class SecuritySyntaxChecker : public Checker<check::ASTCodeBody> {
+public:
+  ChecksFilter filter;
+  
+  void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+                        BugReporter &BR) const {
+    WalkAST walker(BR, mgr.getAnalysisDeclContext(D), filter);
+    walker.Visit(D->getBody());
+  }
+};
+}
+
+#define REGISTER_CHECKER(name) \
+void ento::register##name(CheckerManager &mgr) {\
+  mgr.registerChecker<SecuritySyntaxChecker>()->filter.check_##name = true;\
+}
+
+REGISTER_CHECKER(gets)
+REGISTER_CHECKER(getpw)
+REGISTER_CHECKER(mkstemp)
+REGISTER_CHECKER(mktemp)
+REGISTER_CHECKER(strcpy)
+REGISTER_CHECKER(rand)
+REGISTER_CHECKER(vfork)
+REGISTER_CHECKER(FloatLoopCounter)
+REGISTER_CHECKER(UncheckedReturn)
+
+
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp
new file mode 100644
index 0000000..f2c5050
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp
@@ -0,0 +1,92 @@
+//==- CheckSizeofPointer.cpp - Check for sizeof on pointers ------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines a check for unintended use of sizeof() on pointer
+//  expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class WalkAST : public StmtVisitor<WalkAST> {
+  BugReporter &BR;
+  AnalysisDeclContext* AC;
+
+public:
+  WalkAST(BugReporter &br, AnalysisDeclContext* ac) : BR(br), AC(ac) {}
+  void VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E);
+  void VisitStmt(Stmt *S) { VisitChildren(S); }
+  void VisitChildren(Stmt *S);
+};
+}
+
+void WalkAST::VisitChildren(Stmt *S) {
+  for (Stmt::child_iterator I = S->child_begin(), E = S->child_end(); I!=E; ++I)
+    if (Stmt *child = *I)
+      Visit(child);
+}
+
+// CWE-467: Use of sizeof() on a Pointer Type
+void WalkAST::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E) {
+  if (E->getKind() != UETT_SizeOf)
+    return;
+
+  // If an explicit type is used in the code, usually the coder knows what he is
+  // doing.
+  if (E->isArgumentType())
+    return;
+
+  QualType T = E->getTypeOfArgument();
+  if (T->isPointerType()) {
+
+    // Many false positives have the form 'sizeof *p'. This is reasonable 
+    // because people know what they are doing when they intentionally 
+    // dereference the pointer.
+    Expr *ArgEx = E->getArgumentExpr();
+    if (!isa<DeclRefExpr>(ArgEx->IgnoreParens()))
+      return;
+
+    SourceRange R = ArgEx->getSourceRange();
+    PathDiagnosticLocation ELoc =
+      PathDiagnosticLocation::createBegin(E, BR.getSourceManager(), AC);
+    BR.EmitBasicReport(AC->getDecl(),
+                       "Potential unintended use of sizeof() on pointer type",
+                       "Logic",
+                       "The code calls sizeof() on a pointer type. "
+                       "This can produce an unexpected result.",
+                       ELoc, &R, 1);
+  }
+}
+
+//===----------------------------------------------------------------------===//
+// SizeofPointerChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class SizeofPointerChecker : public Checker<check::ASTCodeBody> {
+public:
+  void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+                        BugReporter &BR) const {
+    WalkAST walker(BR, mgr.getAnalysisDeclContext(D));
+    walker.Visit(D->getBody());
+  }
+};
+}
+
+void ento::registerSizeofPointerChecker(CheckerManager &mgr) {
+  mgr.registerChecker<SizeofPointerChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
new file mode 100644
index 0000000..a9dd19a
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
@@ -0,0 +1,312 @@
+//= CheckerDocumentation.cpp - Documentation checker ---------------*- C++ -*-//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker lists all the checker callbacks and provides documentation for
+// checker writers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+
+using namespace clang;
+using namespace ento;
+
+// All checkers should be placed into anonymous namespace.
+// We place the CheckerDocumentation inside ento namespace to make the
+// it visible in doxygen.
+namespace clang {
+namespace ento {
+
+/// This checker documents the callback functions checkers can use to implement
+/// the custom handling of the specific events during path exploration as well
+/// as reporting bugs. Most of the callbacks are targeted at path-sensitive
+/// checking.
+///
+/// \sa CheckerContext
+class CheckerDocumentation : public Checker< check::PreStmt<ReturnStmt>,
+                                       check::PostStmt<DeclStmt>,
+                                       check::PreObjCMessage,
+                                       check::PostObjCMessage,
+                                       check::PreCall,
+                                       check::PostCall,
+                                       check::BranchCondition,
+                                       check::Location,
+                                       check::Bind,
+                                       check::DeadSymbols,
+                                       check::EndFunction,
+                                       check::EndAnalysis,
+                                       check::EndOfTranslationUnit,
+                                       eval::Call,
+                                       eval::Assume,
+                                       check::LiveSymbols,
+                                       check::RegionChanges,
+                                       check::PointerEscape,
+                                       check::ConstPointerEscape,
+                                       check::Event<ImplicitNullDerefEvent>,
+                                       check::ASTDecl<FunctionDecl> > {
+public:
+
+  /// \brief Pre-visit the Statement.
+  ///
+  /// The method will be called before the analyzer core processes the
+  /// statement. The notification is performed for every explored CFGElement,
+  /// which does not include the control flow statements such as IfStmt. The
+  /// callback can be specialized to be called with any subclass of Stmt.
+  ///
+  /// See checkBranchCondition() callback for performing custom processing of
+  /// the branching statements.
+  ///
+  /// check::PreStmt<ReturnStmt>
+  void checkPreStmt(const ReturnStmt *DS, CheckerContext &C) const {}
+
+  /// \brief Post-visit the Statement.
+  ///
+  /// The method will be called after the analyzer core processes the
+  /// statement. The notification is performed for every explored CFGElement,
+  /// which does not include the control flow statements such as IfStmt. The
+  /// callback can be specialized to be called with any subclass of Stmt.
+  ///
+  /// check::PostStmt<DeclStmt>
+  void checkPostStmt(const DeclStmt *DS, CheckerContext &C) const;
+
+  /// \brief Pre-visit the Objective C message.
+  ///
+  /// This will be called before the analyzer core processes the method call.
+  /// This is called for any action which produces an Objective-C message send,
+  /// including explicit message syntax and property access.
+  ///
+  /// check::PreObjCMessage
+  void checkPreObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const {}
+
+  /// \brief Post-visit the Objective C message.
+  /// \sa checkPreObjCMessage()
+  ///
+  /// check::PostObjCMessage
+  void checkPostObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const {}
+
+  /// \brief Pre-visit an abstract "call" event.
+  ///
+  /// This is used for checkers that want to check arguments or attributed
+  /// behavior for functions and methods no matter how they are being invoked.
+  ///
+  /// Note that this includes ALL cross-body invocations, so if you want to
+  /// limit your checks to, say, function calls, you should test for that at the
+  /// beginning of your callback function.
+  ///
+  /// check::PreCall
+  void checkPreCall(const CallEvent &Call, CheckerContext &C) const {}
+
+  /// \brief Post-visit an abstract "call" event.
+  /// \sa checkPreObjCMessage()
+  ///
+  /// check::PostCall
+  void checkPostCall(const CallEvent &Call, CheckerContext &C) const {}
+
+  /// \brief Pre-visit of the condition statement of a branch (such as IfStmt).
+  void checkBranchCondition(const Stmt *Condition, CheckerContext &Ctx) const {}
+
+  /// \brief Called on a load from and a store to a location.
+  ///
+  /// The method will be called each time a location (pointer) value is
+  /// accessed.
+  /// \param Loc    The value of the location (pointer).
+  /// \param IsLoad The flag specifying if the location is a store or a load.
+  /// \param S      The load is performed while processing the statement.
+  ///
+  /// check::Location
+  void checkLocation(SVal Loc, bool IsLoad, const Stmt *S,
+                     CheckerContext &) const {}
+
+  /// \brief Called on binding of a value to a location.
+  ///
+  /// \param Loc The value of the location (pointer).
+  /// \param Val The value which will be stored at the location Loc.
+  /// \param S   The bind is performed while processing the statement S.
+  ///
+  /// check::Bind
+  void checkBind(SVal Loc, SVal Val, const Stmt *S, CheckerContext &) const {}
+
+
+  /// \brief Called whenever a symbol becomes dead.
+  ///
+  /// This callback should be used by the checkers to aggressively clean
+  /// up/reduce the checker state, which is important for reducing the overall
+  /// memory usage. Specifically, if a checker keeps symbol specific information
+  /// in the sate, it can and should be dropped after the symbol becomes dead.
+  /// In addition, reporting a bug as soon as the checker becomes dead leads to
+  /// more precise diagnostics. (For example, one should report that a malloced
+  /// variable is not freed right after it goes out of scope.)
+  ///
+  /// \param SR The SymbolReaper object can be queried to determine which
+  ///           symbols are dead.
+  ///
+  /// check::DeadSymbols
+  void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const {}
+
+  /// \brief Called when the analyzer core reaches the end of a
+  /// function being analyzed.
+  ///
+  /// check::EndFunction
+  void checkEndFunction(CheckerContext &Ctx) const {}
+
+  /// \brief Called after all the paths in the ExplodedGraph reach end of path
+  /// - the symbolic execution graph is fully explored.
+  ///
+  /// This callback should be used in cases when a checker needs to have a
+  /// global view of the information generated on all paths. For example, to
+  /// compare execution summary/result several paths.
+  /// See IdempotentOperationChecker for a usage example.
+  ///
+  /// check::EndAnalysis
+  void checkEndAnalysis(ExplodedGraph &G,
+                        BugReporter &BR,
+                        ExprEngine &Eng) const {}
+
+  /// \brief Called after analysis of a TranslationUnit is complete.
+  ///
+  /// check::EndOfTranslationUnit
+  void checkEndOfTranslationUnit(const TranslationUnitDecl *TU,
+                                 AnalysisManager &Mgr,
+                                 BugReporter &BR) const {}
+
+
+  /// \brief Evaluates function call.
+  ///
+  /// The analysis core threats all function calls in the same way. However, some
+  /// functions have special meaning, which should be reflected in the program
+  /// state. This callback allows a checker to provide domain specific knowledge
+  /// about the particular functions it knows about.
+  ///
+  /// \returns true if the call has been successfully evaluated
+  /// and false otherwise. Note, that only one checker can evaluate a call. If
+  /// more then one checker claim that they can evaluate the same call the
+  /// first one wins.
+  ///
+  /// eval::Call
+  bool evalCall(const CallExpr *CE, CheckerContext &C) const { return true; }
+
+  /// \brief Handles assumptions on symbolic values.
+  ///
+  /// This method is called when a symbolic expression is assumed to be true or
+  /// false. For example, the assumptions are performed when evaluating a
+  /// condition at a branch. The callback allows checkers track the assumptions
+  /// performed on the symbols of interest and change the state accordingly.
+  ///
+  /// eval::Assume
+  ProgramStateRef evalAssume(ProgramStateRef State,
+                                 SVal Cond,
+                                 bool Assumption) const { return State; }
+
+  /// Allows modifying SymbolReaper object. For example, checkers can explicitly
+  /// register symbols of interest as live. These symbols will not be marked
+  /// dead and removed.
+  ///
+  /// check::LiveSymbols
+  void checkLiveSymbols(ProgramStateRef State, SymbolReaper &SR) const {}
+
+  /// \brief Called to determine if the checker currently needs to know if when
+  /// contents of any regions change.
+  ///
+  /// Since it is not necessarily cheap to compute which regions are being
+  /// changed, this allows the analyzer core to skip the more expensive
+  /// #checkRegionChanges when no checkers are tracking any state.
+  bool wantsRegionChangeUpdate(ProgramStateRef St) const { return true; }
+  
+  /// \brief Called when the contents of one or more regions change.
+  ///
+  /// This can occur in many different ways: an explicit bind, a blanket
+  /// invalidation of the region contents, or by passing a region to a function
+  /// call whose behavior the analyzer cannot model perfectly.
+  ///
+  /// \param State The current program state.
+  /// \param Invalidated A set of all symbols potentially touched by the change.
+  /// \param ExplicitRegions The regions explicitly requested for invalidation.
+  ///        For a function call, this would be the arguments. For a bind, this
+  ///        would be the region being bound to.
+  /// \param Regions The transitive closure of regions accessible from,
+  ///        \p ExplicitRegions, i.e. all regions that may have been touched
+  ///        by this change. For a simple bind, this list will be the same as
+  ///        \p ExplicitRegions, since a bind does not affect the contents of
+  ///        anything accessible through the base region.
+  /// \param Call The opaque call triggering this invalidation. Will be 0 if the
+  ///        change was not triggered by a call.
+  ///
+  /// Note that this callback will not be invoked unless
+  /// #wantsRegionChangeUpdate returns \c true.
+  ///
+  /// check::RegionChanges
+  ProgramStateRef 
+    checkRegionChanges(ProgramStateRef State,
+                       const InvalidatedSymbols *Invalidated,
+                       ArrayRef<const MemRegion *> ExplicitRegions,
+                       ArrayRef<const MemRegion *> Regions,
+                       const CallEvent *Call) const {
+    return State;
+  }
+
+  /// \brief Called when pointers escape.
+  ///
+  /// This notifies the checkers about pointer escape, which occurs whenever
+  /// the analyzer cannot track the symbol any more. For example, as a
+  /// result of assigning a pointer into a global or when it's passed to a 
+  /// function call the analyzer cannot model.
+  /// 
+  /// \param State The state at the point of escape.
+  /// \param Escaped The list of escaped symbols.
+  /// \param Call The corresponding CallEvent, if the symbols escape as 
+  /// parameters to the given call.
+  /// \param Kind How the symbols have escaped.
+  /// \returns Checkers can modify the state by returning a new state.
+  ProgramStateRef checkPointerEscape(ProgramStateRef State,
+                                     const InvalidatedSymbols &Escaped,
+                                     const CallEvent *Call,
+                                     PointerEscapeKind Kind) const {
+    return State;
+  }
+
+  /// \brief Called when const pointers escape.
+  ///
+  /// Note: in most cases checkPointerEscape callback is sufficient.
+  /// \sa checkPointerEscape
+  ProgramStateRef checkConstPointerEscape(ProgramStateRef State,
+                                     const InvalidatedSymbols &Escaped,
+                                     const CallEvent *Call,
+                                     PointerEscapeKind Kind) const {
+    return State;
+  }
+                                         
+  /// check::Event<ImplicitNullDerefEvent>
+  void checkEvent(ImplicitNullDerefEvent Event) const {}
+
+  /// \brief Check every declaration in the AST.
+  ///
+  /// An AST traversal callback, which should only be used when the checker is
+  /// not path sensitive. It will be called for every Declaration in the AST and
+  /// can be specialized to only be called on subclasses of Decl, for example,
+  /// FunctionDecl.
+  ///
+  /// check::ASTDecl<FunctionDecl>
+  void checkASTDecl(const FunctionDecl *D,
+                    AnalysisManager &Mgr,
+                    BugReporter &BR) const {}
+
+};
+
+void CheckerDocumentation::checkPostStmt(const DeclStmt *DS,
+                                         CheckerContext &C) const {
+  return;
+}
+
+} // end namespace ento
+} // end namespace clang
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td
new file mode 100644
index 0000000..fc35b22
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td
@@ -0,0 +1,541 @@
+//===--- Checkers.td - Static Analyzer Checkers -===-----------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+include "clang/StaticAnalyzer/Checkers/CheckerBase.td"
+
+//===----------------------------------------------------------------------===//
+// Packages.
+//===----------------------------------------------------------------------===//
+
+def Alpha : Package<"alpha">;
+
+def Core : Package<"core">;
+def CoreBuiltin : Package<"builtin">, InPackage<Core>;
+def CoreUninitialized  : Package<"uninitialized">, InPackage<Core>;
+def CoreAlpha : Package<"core">, InPackage<Alpha>, Hidden;
+
+def Cplusplus : Package<"cplusplus">;
+def CplusplusAlpha : Package<"cplusplus">, InPackage<Alpha>, Hidden;
+
+def DeadCode : Package<"deadcode">;
+def DeadCodeAlpha : Package<"deadcode">, InPackage<Alpha>, Hidden;
+
+def Security : Package <"security">;
+def InsecureAPI : Package<"insecureAPI">, InPackage<Security>;
+def SecurityAlpha : Package<"security">, InPackage<Alpha>, Hidden;
+def Taint : Package<"taint">, InPackage<SecurityAlpha>, Hidden;  
+
+def Unix : Package<"unix">;
+def UnixAlpha : Package<"unix">, InPackage<Alpha>, Hidden;
+def CString : Package<"cstring">, InPackage<Unix>, Hidden;
+def CStringAlpha : Package<"cstring">, InPackage<UnixAlpha>, Hidden;
+
+def OSX : Package<"osx">;
+def OSXAlpha : Package<"osx">, InPackage<Alpha>, Hidden;
+def Cocoa : Package<"cocoa">, InPackage<OSX>;
+def CocoaAlpha : Package<"cocoa">, InPackage<OSXAlpha>, Hidden;
+def CoreFoundation : Package<"coreFoundation">, InPackage<OSX>;
+def Containers : Package<"containers">, InPackage<CoreFoundation>;
+
+def LLVM : Package<"llvm">;
+def Debug : Package<"debug">;
+
+//===----------------------------------------------------------------------===//
+// Core Checkers.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = Core in {
+
+def DereferenceChecker : Checker<"NullDereference">,
+  HelpText<"Check for dereferences of null pointers">,
+  DescFile<"DereferenceChecker.cpp">;
+
+def CallAndMessageChecker : Checker<"CallAndMessage">,
+  HelpText<"Check for logical errors for function calls and Objective-C message expressions (e.g., uninitialized arguments, null function pointers)">,
+  DescFile<"CallAndMessageChecker.cpp">;
+
+def NonNullParamChecker : Checker<"NonNullParamChecker">,
+  HelpText<"Check for null pointers passed as arguments to a function whose arguments are references or marked with the 'nonnull' attribute">,
+  DescFile<"NonNullParamChecker.cpp">;
+
+def VLASizeChecker : Checker<"VLASize">,
+  HelpText<"Check for declarations of VLA of undefined or zero size">,
+  DescFile<"VLASizeChecker.cpp">;
+
+def DivZeroChecker : Checker<"DivideZero">,
+  HelpText<"Check for division by zero">,
+  DescFile<"DivZeroChecker.cpp">;
+
+def UndefResultChecker : Checker<"UndefinedBinaryOperatorResult">,
+  HelpText<"Check for undefined results of binary operators">,
+  DescFile<"UndefResultChecker.cpp">;
+
+def StackAddrEscapeChecker : Checker<"StackAddressEscape">,
+  HelpText<"Check that addresses to stack memory do not escape the function">,
+  DescFile<"StackAddrEscapeChecker.cpp">;
+
+def DynamicTypePropagation : Checker<"DynamicTypePropagation">,
+  HelpText<"Generate dynamic type information">,
+  DescFile<"DynamicTypePropagation.cpp">;
+
+} // end "core"
+
+let ParentPackage = CoreAlpha in {
+
+def BoolAssignmentChecker : Checker<"BoolAssignment">,
+  HelpText<"Warn about assigning non-{0,1} values to Boolean variables">,
+  DescFile<"BoolAssignmentChecker.cpp">;
+
+def CastSizeChecker : Checker<"CastSize">,
+  HelpText<"Check when casting a malloc'ed type T, whether the size is a multiple of the size of T">,
+  DescFile<"CastSizeChecker.cpp">;
+
+def CastToStructChecker : Checker<"CastToStruct">,
+  HelpText<"Check for cast from non-struct pointer to struct pointer">,
+  DescFile<"CastToStructChecker.cpp">;
+
+def FixedAddressChecker : Checker<"FixedAddr">,
+  HelpText<"Check for assignment of a fixed address to a pointer">,
+  DescFile<"FixedAddressChecker.cpp">;
+
+def PointerArithChecker : Checker<"PointerArithm">,
+  HelpText<"Check for pointer arithmetic on locations other than array elements">,
+  DescFile<"PointerArithChecker">;
+
+def PointerSubChecker : Checker<"PointerSub">,
+  HelpText<"Check for pointer subtractions on two pointers pointing to different memory chunks">,
+  DescFile<"PointerSubChecker">;
+
+def SizeofPointerChecker : Checker<"SizeofPtr">,
+  HelpText<"Warn about unintended use of sizeof() on pointer expressions">,
+  DescFile<"CheckSizeofPointer.cpp">;
+
+} // end "alpha.core"
+
+//===----------------------------------------------------------------------===//
+// Evaluate "builtin" functions.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = CoreBuiltin in {
+
+def NoReturnFunctionChecker : Checker<"NoReturnFunctions">,
+  HelpText<"Evaluate \"panic\" functions that are known to not return to the caller">,
+  DescFile<"NoReturnFunctionChecker.cpp">;
+
+def BuiltinFunctionChecker : Checker<"BuiltinFunctions">,
+  HelpText<"Evaluate compiler builtin functions (e.g., alloca())">,
+  DescFile<"BuiltinFunctionChecker.cpp">;
+
+} // end "core.builtin"
+
+//===----------------------------------------------------------------------===//
+// Uninitialized values checkers.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = CoreUninitialized in {
+
+def UndefinedArraySubscriptChecker : Checker<"ArraySubscript">,
+  HelpText<"Check for uninitialized values used as array subscripts">,
+  DescFile<"UndefinedArraySubscriptChecker.cpp">;
+
+def UndefinedAssignmentChecker : Checker<"Assign">,
+  HelpText<"Check for assigning uninitialized values">,
+  DescFile<"UndefinedAssignmentChecker.cpp">;
+
+def UndefBranchChecker : Checker<"Branch">,
+  HelpText<"Check for uninitialized values used as branch conditions">,
+  DescFile<"UndefBranchChecker.cpp">;
+
+def UndefCapturedBlockVarChecker : Checker<"CapturedBlockVariable">,
+  HelpText<"Check for blocks that capture uninitialized values">,
+  DescFile<"UndefCapturedBlockVarChecker.cpp">;
+  
+def ReturnUndefChecker : Checker<"UndefReturn">,
+  HelpText<"Check for uninitialized values being returned to the caller">,
+  DescFile<"ReturnUndefChecker.cpp">;
+
+} // end "core.uninitialized"
+
+//===----------------------------------------------------------------------===//
+// C++ checkers.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = Cplusplus in {
+
+def NewDeleteChecker : Checker<"NewDelete">,
+  HelpText<"Check for double-free and use-after-free problems. Traces memory managed by new/delete.">, 
+  DescFile<"MallocChecker.cpp">;
+
+} // end: "cplusplus"
+
+let ParentPackage = CplusplusAlpha in {
+
+def VirtualCallChecker : Checker<"VirtualCall">,
+  HelpText<"Check virtual function calls during construction or destruction">, 
+  DescFile<"VirtualCallChecker.cpp">;
+
+def NewDeleteLeaksChecker : Checker<"NewDeleteLeaks">,
+  HelpText<"Check for memory leaks. Traces memory managed by new/delete.">, 
+  DescFile<"MallocChecker.cpp">;
+
+} // end: "alpha.cplusplus"
+
+//===----------------------------------------------------------------------===//
+// Deadcode checkers.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = DeadCode in {
+
+def DeadStoresChecker : Checker<"DeadStores">,
+  HelpText<"Check for values stored to variables that are never read afterwards">,
+  DescFile<"DeadStoresChecker.cpp">;
+} // end DeadCode
+
+let ParentPackage = DeadCodeAlpha in {
+
+def IdempotentOperationChecker : Checker<"IdempotentOperations">,
+  HelpText<"Warn about idempotent operations">,
+  DescFile<"IdempotentOperationChecker.cpp">;
+
+def UnreachableCodeChecker : Checker<"UnreachableCode">,
+  HelpText<"Check unreachable code">,
+  DescFile<"UnreachableCodeChecker.cpp">;
+
+} // end "alpha.deadcode"
+
+//===----------------------------------------------------------------------===//
+// Security checkers.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = InsecureAPI in {
+  def gets : Checker<"gets">,
+    HelpText<"Warn on uses of the 'gets' function">,
+    DescFile<"CheckSecuritySyntaxOnly.cpp">;
+  def getpw : Checker<"getpw">,
+    HelpText<"Warn on uses of the 'getpw' function">,
+    DescFile<"CheckSecuritySyntaxOnly.cpp">;
+  def mktemp : Checker<"mktemp">,
+    HelpText<"Warn on uses of the 'mktemp' function">,
+    DescFile<"CheckSecuritySyntaxOnly.cpp">;
+  def mkstemp : Checker<"mkstemp">,
+    HelpText<"Warn when 'mkstemp' is passed fewer than 6 X's in the format string">,
+    DescFile<"CheckSecuritySyntaxOnly.cpp">;
+  def rand : Checker<"rand">,
+    HelpText<"Warn on uses of the 'rand', 'random', and related functions">,
+    DescFile<"CheckSecuritySyntaxOnly.cpp">;
+  def strcpy : Checker<"strcpy">,
+    HelpText<"Warn on uses of the 'strcpy' and 'strcat' functions">,
+    DescFile<"CheckSecuritySyntaxOnly.cpp">;
+  def vfork : Checker<"vfork">,
+    HelpText<"Warn on uses of the 'vfork' function">,
+    DescFile<"CheckSecuritySyntaxOnly.cpp">;
+  def UncheckedReturn : Checker<"UncheckedReturn">,
+    HelpText<"Warn on uses of functions whose return values must be always checked">,
+    DescFile<"CheckSecuritySyntaxOnly.cpp">;
+}
+let ParentPackage = Security in {
+  def FloatLoopCounter : Checker<"FloatLoopCounter">,
+    HelpText<"Warn on using a floating point value as a loop counter (CERT: FLP30-C, FLP30-CPP)">,
+    DescFile<"CheckSecuritySyntaxOnly.cpp">;
+}
+
+let ParentPackage = SecurityAlpha in {
+
+def ArrayBoundChecker : Checker<"ArrayBound">,
+  HelpText<"Warn about buffer overflows (older checker)">,
+  DescFile<"ArrayBoundChecker.cpp">;  
+
+def ArrayBoundCheckerV2 : Checker<"ArrayBoundV2">,
+  HelpText<"Warn about buffer overflows (newer checker)">,
+  DescFile<"ArrayBoundCheckerV2.cpp">;
+
+def ReturnPointerRangeChecker : Checker<"ReturnPtrRange">,
+  HelpText<"Check for an out-of-bound pointer being returned to callers">,
+  DescFile<"ReturnPointerRangeChecker.cpp">;
+
+def MallocOverflowSecurityChecker : Checker<"MallocOverflow">,
+  HelpText<"Check for overflows in the arguments to malloc()">,
+  DescFile<"MallocOverflowSecurityChecker.cpp">;
+
+} // end "alpha.security"
+
+//===----------------------------------------------------------------------===//
+// Taint checkers.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = Taint in {
+
+def GenericTaintChecker : Checker<"TaintPropagation">,
+  HelpText<"Generate taint information used by other checkers">,
+  DescFile<"GenericTaintChecker.cpp">;
+
+} // end "alpha.security.taint"
+
+//===----------------------------------------------------------------------===//
+// Unix API checkers.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = Unix in {
+
+def UnixAPIChecker : Checker<"API">,
+  HelpText<"Check calls to various UNIX/Posix functions">,
+  DescFile<"UnixAPIChecker.cpp">;
+
+def MallocPessimistic : Checker<"Malloc">,
+  HelpText<"Check for memory leaks, double free, and use-after-free problems. Traces memory managed by malloc()/free().">,
+  DescFile<"MallocChecker.cpp">;
+  
+def MallocSizeofChecker : Checker<"MallocSizeof">,
+  HelpText<"Check for dubious malloc arguments involving sizeof">,
+  DescFile<"MallocSizeofChecker.cpp">;
+
+def MismatchedDeallocatorChecker : Checker<"MismatchedDeallocator">,
+  HelpText<"Check for mismatched deallocators.">,
+  DescFile<"MallocChecker.cpp">;
+  
+} // end "unix"
+
+let ParentPackage = UnixAlpha in {
+
+def ChrootChecker : Checker<"Chroot">,
+  HelpText<"Check improper use of chroot">,
+  DescFile<"ChrootChecker.cpp">;
+
+def MallocOptimistic : Checker<"MallocWithAnnotations">,
+  HelpText<"Check for memory leaks, double free, and use-after-free problems. Traces memory managed by malloc()/free(). Assumes that all user-defined functions which might free a pointer are annotated.">,
+  DescFile<"MallocChecker.cpp">;
+
+def PthreadLockChecker : Checker<"PthreadLock">,
+  HelpText<"Simple lock -> unlock checker">,
+  DescFile<"PthreadLockChecker.cpp">;
+
+def StreamChecker : Checker<"Stream">,
+  HelpText<"Check stream handling functions">,
+  DescFile<"StreamChecker.cpp">;
+
+def SimpleStreamChecker : Checker<"SimpleStream">,
+  HelpText<"Check for misuses of stream APIs">,
+  DescFile<"SimpleStreamChecker.cpp">;
+
+} // end "alpha.unix"
+
+let ParentPackage = CString in {
+
+def CStringNullArg : Checker<"NullArg">,
+  HelpText<"Check for null pointers being passed as arguments to C string functions">,
+  DescFile<"CStringChecker.cpp">;
+
+def CStringSyntaxChecker : Checker<"BadSizeArg">,
+  HelpText<"Check the size argument passed into C string functions for common erroneous patterns">,
+  DescFile<"CStringSyntaxChecker.cpp">;  
+}
+
+let ParentPackage = CStringAlpha in {
+
+def CStringOutOfBounds : Checker<"OutOfBounds">,
+  HelpText<"Check for out-of-bounds access in string functions">,
+  DescFile<"CStringChecker.cpp">;
+
+def CStringBufferOverlap : Checker<"BufferOverlap">,
+  HelpText<"Checks for overlap in two buffer arguments">,
+  DescFile<"CStringChecker.cpp">;
+
+def CStringNotNullTerm : Checker<"NotNullTerminated">,
+  HelpText<"Check for arguments which are not null-terminating strings">,
+  DescFile<"CStringChecker.cpp">;
+}
+
+//===----------------------------------------------------------------------===//
+// Mac OS X, Cocoa, and Core Foundation checkers.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = OSX in {
+
+def MacOSXAPIChecker : Checker<"API">,
+  InPackage<OSX>,
+  HelpText<"Check for proper uses of various Apple APIs">,
+  DescFile<"MacOSXAPIChecker.cpp">;
+
+def MacOSKeychainAPIChecker : Checker<"SecKeychainAPI">,
+  InPackage<OSX>,
+  HelpText<"Check for proper uses of Secure Keychain APIs">,
+  DescFile<"MacOSKeychainAPIChecker.cpp">;
+
+} // end "osx"
+
+let ParentPackage = Cocoa in {
+
+def ObjCAtSyncChecker : Checker<"AtSync">,
+  HelpText<"Check for nil pointers used as mutexes for @synchronized">,
+  DescFile<"ObjCAtSyncChecker.cpp">;
+
+def NilArgChecker : Checker<"NilArg">,
+  HelpText<"Check for prohibited nil arguments to ObjC method calls">,
+  DescFile<"BasicObjCFoundationChecks.cpp">;
+
+def ClassReleaseChecker : Checker<"ClassRelease">,
+  HelpText<"Check for sending 'retain', 'release', or 'autorelease' directly to a Class">,
+  DescFile<"BasicObjCFoundationChecks.cpp">;
+
+def VariadicMethodTypeChecker : Checker<"VariadicMethodTypes">,
+  HelpText<"Check for passing non-Objective-C types to variadic collection "
+           "initialization methods that expect only Objective-C types">,
+  DescFile<"BasicObjCFoundationChecks.cpp">;
+
+def NSAutoreleasePoolChecker : Checker<"NSAutoreleasePool">,
+  HelpText<"Warn for suboptimal uses of NSAutoreleasePool in Objective-C GC mode">,
+  DescFile<"NSAutoreleasePoolChecker.cpp">;
+
+def ObjCMethSigsChecker : Checker<"IncompatibleMethodTypes">,
+  HelpText<"Warn about Objective-C method signatures with type incompatibilities">,
+  DescFile<"CheckObjCInstMethSignature.cpp">;
+
+def ObjCUnusedIvarsChecker : Checker<"UnusedIvars">,
+  HelpText<"Warn about private ivars that are never used">,
+  DescFile<"ObjCUnusedIVarsChecker.cpp">;
+
+def ObjCSelfInitChecker : Checker<"SelfInit">,
+  HelpText<"Check that 'self' is properly initialized inside an initializer method">,
+  DescFile<"ObjCSelfInitChecker.cpp">;
+
+def ObjCLoopChecker : Checker<"Loops">,
+  HelpText<"Improved modeling of loops using Cocoa collection types">,
+  DescFile<"BasicObjCFoundationChecks.cpp">;
+
+def ObjCNonNilReturnValueChecker : Checker<"NonNilReturnValue">,
+  HelpText<"Model the APIs that are guaranteed to return a non-nil value">,
+  DescFile<"BasicObjCFoundationChecks.cpp">;
+
+def NSErrorChecker : Checker<"NSError">,
+  HelpText<"Check usage of NSError** parameters">,
+  DescFile<"NSErrorChecker.cpp">;
+
+def RetainCountChecker : Checker<"RetainCount">,
+  HelpText<"Check for leaks and improper reference count management">,
+  DescFile<"RetainCountChecker.cpp">;
+
+} // end "osx.cocoa"
+
+let ParentPackage = CocoaAlpha in {
+
+def ObjCDeallocChecker : Checker<"Dealloc">,
+  HelpText<"Warn about Objective-C classes that lack a correct implementation of -dealloc">,
+  DescFile<"CheckObjCDealloc.cpp">;
+
+def InstanceVariableInvalidation : Checker<"InstanceVariableInvalidation">,
+  HelpText<"Check that the invalidatable instance variables are invalidated in the methods annotated with objc_instance_variable_invalidator">,
+  DescFile<"IvarInvalidationChecker.cpp">;
+
+def MissingInvalidationMethod : Checker<"MissingInvalidationMethod">,
+  HelpText<"Check that the invalidation methods are present in classes that contain invalidatable instance variables">,
+  DescFile<"IvarInvalidationChecker.cpp">;
+
+def DirectIvarAssignment : Checker<"DirectIvarAssignment">,
+  HelpText<"Check for direct assignments to instance variables">,
+  DescFile<"DirectIvarAssignment.cpp">;
+
+def DirectIvarAssignmentForAnnotatedFunctions : Checker<"DirectIvarAssignmentForAnnotatedFunctions">,
+  HelpText<"Check for direct assignments to instance variables in the methods annotated with objc_no_direct_instance_variable_assignment">,
+  DescFile<"DirectIvarAssignment.cpp">;
+
+def ObjCSuperCallChecker : Checker<"MissingSuperCall">,
+  HelpText<"Warn about Objective-C methods that lack a necessary call to super">,
+  DescFile<"ObjCMissingSuperCallChecker.cpp">;
+
+} // end "alpha.osx.cocoa"
+
+let ParentPackage = CoreFoundation in {
+
+def CFNumberCreateChecker : Checker<"CFNumber">,
+  HelpText<"Check for proper uses of CFNumberCreate">,
+  DescFile<"BasicObjCFoundationChecks.cpp">;
+
+def CFRetainReleaseChecker : Checker<"CFRetainRelease">,
+  HelpText<"Check for null arguments to CFRetain/CFRelease/CFMakeCollectable">,
+  DescFile<"BasicObjCFoundationChecks.cpp">;
+
+def CFErrorChecker : Checker<"CFError">,
+  HelpText<"Check usage of CFErrorRef* parameters">,
+  DescFile<"NSErrorChecker.cpp">;
+}
+
+let ParentPackage = Containers in {
+def ObjCContainersASTChecker : Checker<"PointerSizedValues">,
+  HelpText<"Warns if 'CFArray', 'CFDictionary', 'CFSet' are created with non-pointer-size values">,
+  DescFile<"ObjCContainersASTChecker.cpp">;
+
+def ObjCContainersChecker : Checker<"OutOfBounds">,
+  HelpText<"Checks for index out-of-bounds when using 'CFArray' API">,
+  DescFile<"ObjCContainersChecker.cpp">;
+    
+}
+//===----------------------------------------------------------------------===//
+// Checkers for LLVM development.
+//===----------------------------------------------------------------------===//
+
+def LLVMConventionsChecker : Checker<"Conventions">,
+  InPackage<LLVM>,
+  HelpText<"Check code for LLVM codebase conventions">,
+  DescFile<"LLVMConventionsChecker.cpp">;
+
+//===----------------------------------------------------------------------===//
+// Debugging checkers (for analyzer development).
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = Debug in {
+
+def DominatorsTreeDumper : Checker<"DumpDominators">,
+  HelpText<"Print the dominance tree for a given CFG">,
+  DescFile<"DebugCheckers.cpp">;
+
+def LiveVariablesDumper : Checker<"DumpLiveVars">,
+  HelpText<"Print results of live variable analysis">,
+  DescFile<"DebugCheckers.cpp">;
+
+def CFGViewer : Checker<"ViewCFG">,
+  HelpText<"View Control-Flow Graphs using GraphViz">,
+  DescFile<"DebugCheckers.cpp">;
+
+def CFGDumper : Checker<"DumpCFG">,
+  HelpText<"Display Control-Flow Graphs">,
+  DescFile<"DebugCheckers.cpp">;
+
+def CallGraphViewer : Checker<"ViewCallGraph">,
+  HelpText<"View Call Graph using GraphViz">,
+  DescFile<"DebugCheckers.cpp">;
+
+def CallGraphDumper : Checker<"DumpCallGraph">,
+  HelpText<"Display Call Graph">,
+  DescFile<"DebugCheckers.cpp">;
+
+def ConfigDumper : Checker<"ConfigDumper">,
+  HelpText<"Dump config table">,
+  DescFile<"DebugCheckers.cpp">;
+
+def TraversalDumper : Checker<"DumpTraversal">,
+  HelpText<"Print branch conditions as they are traversed by the engine">,
+  DescFile<"TraversalChecker.cpp">;
+
+def CallDumper : Checker<"DumpCalls">,
+  HelpText<"Print calls as they are traversed by the engine">,
+  DescFile<"TraversalChecker.cpp">;
+
+def AnalyzerStatsChecker : Checker<"Stats">,
+  HelpText<"Emit warnings with analyzer statistics">,
+  DescFile<"AnalyzerStatsChecker.cpp">;
+
+def TaintTesterChecker : Checker<"TaintTest">,
+  HelpText<"Mark tainted symbols as such.">,
+  DescFile<"TaintTesterChecker.cpp">;
+
+def ExprInspectionChecker : Checker<"ExprInspection">,
+  HelpText<"Check the analyzer's understanding of expressions">,
+  DescFile<"ExprInspectionChecker.cpp">;
+
+} // end "debug"
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
new file mode 100644
index 0000000..9912965
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
@@ -0,0 +1,158 @@
+//===- Chrootchecker.cpp -------- Basic security checks ----------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines chroot checker, which checks improper use of chroot.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "llvm/ADT/ImmutableMap.h"
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+// enum value that represent the jail state
+enum Kind { NO_CHROOT, ROOT_CHANGED, JAIL_ENTERED };
+  
+bool isRootChanged(intptr_t k) { return k == ROOT_CHANGED; }
+//bool isJailEntered(intptr_t k) { return k == JAIL_ENTERED; }
+
+// This checker checks improper use of chroot.
+// The state transition:
+// NO_CHROOT ---chroot(path)--> ROOT_CHANGED ---chdir(/) --> JAIL_ENTERED
+//                                  |                               |
+//         ROOT_CHANGED<--chdir(..)--      JAIL_ENTERED<--chdir(..)--
+//                                  |                               |
+//                      bug<--foo()--          JAIL_ENTERED<--foo()--
+class ChrootChecker : public Checker<eval::Call, check::PreStmt<CallExpr> > {
+  mutable IdentifierInfo *II_chroot, *II_chdir;
+  // This bug refers to possibly break out of a chroot() jail.
+  mutable OwningPtr<BuiltinBug> BT_BreakJail;
+
+public:
+  ChrootChecker() : II_chroot(0), II_chdir(0) {}
+  
+  static void *getTag() {
+    static int x;
+    return &x;
+  }
+  
+  bool evalCall(const CallExpr *CE, CheckerContext &C) const;
+  void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+
+private:
+  void Chroot(CheckerContext &C, const CallExpr *CE) const;
+  void Chdir(CheckerContext &C, const CallExpr *CE) const;
+};
+
+} // end anonymous namespace
+
+bool ChrootChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
+  const FunctionDecl *FD = C.getCalleeDecl(CE);
+  if (!FD)
+    return false;
+
+  ASTContext &Ctx = C.getASTContext();
+  if (!II_chroot)
+    II_chroot = &Ctx.Idents.get("chroot");
+  if (!II_chdir)
+    II_chdir = &Ctx.Idents.get("chdir");
+
+  if (FD->getIdentifier() == II_chroot) {
+    Chroot(C, CE);
+    return true;
+  }
+  if (FD->getIdentifier() == II_chdir) {
+    Chdir(C, CE);
+    return true;
+  }
+
+  return false;
+}
+
+void ChrootChecker::Chroot(CheckerContext &C, const CallExpr *CE) const {
+  ProgramStateRef state = C.getState();
+  ProgramStateManager &Mgr = state->getStateManager();
+  
+  // Once encouter a chroot(), set the enum value ROOT_CHANGED directly in 
+  // the GDM.
+  state = Mgr.addGDM(state, ChrootChecker::getTag(), (void*) ROOT_CHANGED);
+  C.addTransition(state);
+}
+
+void ChrootChecker::Chdir(CheckerContext &C, const CallExpr *CE) const {
+  ProgramStateRef state = C.getState();
+  ProgramStateManager &Mgr = state->getStateManager();
+
+  // If there are no jail state in the GDM, just return.
+  const void *k = state->FindGDM(ChrootChecker::getTag());
+  if (!k)
+    return;
+
+  // After chdir("/"), enter the jail, set the enum value JAIL_ENTERED.
+  const Expr *ArgExpr = CE->getArg(0);
+  SVal ArgVal = state->getSVal(ArgExpr, C.getLocationContext());
+  
+  if (const MemRegion *R = ArgVal.getAsRegion()) {
+    R = R->StripCasts();
+    if (const StringRegion* StrRegion= dyn_cast<StringRegion>(R)) {
+      const StringLiteral* Str = StrRegion->getStringLiteral();
+      if (Str->getString() == "/")
+        state = Mgr.addGDM(state, ChrootChecker::getTag(),
+                           (void*) JAIL_ENTERED);
+    }
+  }
+
+  C.addTransition(state);
+}
+
+// Check the jail state before any function call except chroot and chdir().
+void ChrootChecker::checkPreStmt(const CallExpr *CE, CheckerContext &C) const {
+  const FunctionDecl *FD = C.getCalleeDecl(CE);
+  if (!FD)
+    return;
+
+  ASTContext &Ctx = C.getASTContext();
+  if (!II_chroot)
+    II_chroot = &Ctx.Idents.get("chroot");
+  if (!II_chdir)
+    II_chdir = &Ctx.Idents.get("chdir");
+
+  // Ingnore chroot and chdir.
+  if (FD->getIdentifier() == II_chroot || FD->getIdentifier() == II_chdir)
+    return;
+  
+  // If jail state is ROOT_CHANGED, generate BugReport.
+  void *const* k = C.getState()->FindGDM(ChrootChecker::getTag());
+  if (k)
+    if (isRootChanged((intptr_t) *k))
+      if (ExplodedNode *N = C.addTransition()) {
+        if (!BT_BreakJail)
+          BT_BreakJail.reset(new BuiltinBug("Break out of jail",
+                                        "No call of chdir(\"/\") immediately "
+                                        "after chroot"));
+        BugReport *R = new BugReport(*BT_BreakJail, 
+                                     BT_BreakJail->getDescription(), N);
+        C.emitReport(R);
+      }
+  
+  return;
+}
+
+void ento::registerChrootChecker(CheckerManager &mgr) {
+  mgr.registerChecker<ChrootChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ClangCheckers.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ClangCheckers.cpp
new file mode 100644
index 0000000..77a5a72
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ClangCheckers.cpp
@@ -0,0 +1,32 @@
+//===--- ClangCheckers.h - Provides builtin checkers ------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Checkers/ClangCheckers.h"
+#include "clang/StaticAnalyzer/Core/CheckerRegistry.h"
+
+// FIXME: This is only necessary as long as there are checker registration
+// functions that do additional work besides mgr.registerChecker<CLASS>().
+// The only checkers that currently do this are:
+// - NSAutoreleasePoolChecker
+// - NSErrorChecker
+// - ObjCAtSyncChecker
+// It's probably worth including this information in Checkers.td to minimize
+// boilerplate code.
+#include "ClangSACheckers.h"
+
+using namespace clang;
+using namespace ento;
+
+void ento::registerBuiltinCheckers(CheckerRegistry &registry) {
+#define GET_CHECKERS
+#define CHECKER(FULLNAME,CLASS,DESCFILE,HELPTEXT,GROUPINDEX,HIDDEN)    \
+  registry.addChecker(register##CLASS, FULLNAME, HELPTEXT);
+#include "Checkers.inc"
+#undef GET_CHECKERS
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ClangSACheckers.h b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ClangSACheckers.h
new file mode 100644
index 0000000..bea908d
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ClangSACheckers.h
@@ -0,0 +1,37 @@
+//===--- ClangSACheckers.h - Registration functions for Checkers *- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Declares the registation functions for the checkers defined in
+// libclangStaticAnalyzerCheckers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SA_LIB_CHECKERS_CLANGSACHECKERS_H
+#define LLVM_CLANG_SA_LIB_CHECKERS_CLANGSACHECKERS_H
+
+#include "clang/StaticAnalyzer/Checkers/CommonBugCategories.h"
+
+namespace clang {
+
+namespace ento {
+class CheckerManager;
+class CheckerRegistry;
+
+#define GET_CHECKERS
+#define CHECKER(FULLNAME,CLASS,CXXFILE,HELPTEXT,GROUPINDEX,HIDDEN)    \
+  void register##CLASS(CheckerManager &mgr);
+#include "Checkers.inc"
+#undef CHECKER
+#undef GET_CHECKERS
+
+} // end ento namespace
+
+} // end clang namespace
+
+#endif
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CommonBugCategories.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CommonBugCategories.cpp
new file mode 100644
index 0000000..e2a8ea6
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/CommonBugCategories.cpp
@@ -0,0 +1,18 @@
+//=--- CommonBugCategories.cpp - Provides common issue categories -*- C++ -*-=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+// Common strings used for the "category" of many static analyzer issues.
+namespace clang { namespace ento { namespace categories {
+
+const char *CoreFoundationObjectiveC = "Core Foundation/Objective-C";
+const char *MemoryCoreFoundationObjectiveC =
+  "Memory (Core Foundation/Objective-C)";
+const char *UnixAPI = "Unix API";
+}}}
+
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
new file mode 100644
index 0000000..f336a6e
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
@@ -0,0 +1,452 @@
+//==- DeadStoresChecker.cpp - Check for stores to dead variables -*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines a DeadStores, a flow-sensitive checker that looks for
+//  stores to variables that are no longer live.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/SaveAndRestore.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {  
+  
+/// A simple visitor to record what VarDecls occur in EH-handling code.
+class EHCodeVisitor : public RecursiveASTVisitor<EHCodeVisitor> {
+public:
+  bool inEH;
+  llvm::DenseSet<const VarDecl *> &S;
+  
+  bool TraverseObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
+    SaveAndRestore<bool> inFinally(inEH, true);
+    return ::RecursiveASTVisitor<EHCodeVisitor>::TraverseObjCAtFinallyStmt(S);
+  }
+  
+  bool TraverseObjCAtCatchStmt(ObjCAtCatchStmt *S) {
+    SaveAndRestore<bool> inCatch(inEH, true);
+    return ::RecursiveASTVisitor<EHCodeVisitor>::TraverseObjCAtCatchStmt(S);
+  }
+  
+  bool TraverseCXXCatchStmt(CXXCatchStmt *S) {
+    SaveAndRestore<bool> inCatch(inEH, true);
+    return TraverseStmt(S->getHandlerBlock());
+  }
+  
+  bool VisitDeclRefExpr(DeclRefExpr *DR) {
+    if (inEH)
+      if (const VarDecl *D = dyn_cast<VarDecl>(DR->getDecl()))
+        S.insert(D);
+    return true;
+  }
+  
+  EHCodeVisitor(llvm::DenseSet<const VarDecl *> &S) :
+  inEH(false), S(S) {}
+};
+
+// FIXME: Eventually migrate into its own file, and have it managed by
+// AnalysisManager.
+class ReachableCode {
+  const CFG &cfg;
+  llvm::BitVector reachable;
+public:
+  ReachableCode(const CFG &cfg)
+    : cfg(cfg), reachable(cfg.getNumBlockIDs(), false) {}
+  
+  void computeReachableBlocks();
+  
+  bool isReachable(const CFGBlock *block) const {
+    return reachable[block->getBlockID()];
+  }
+};
+}
+
+void ReachableCode::computeReachableBlocks() {
+  if (!cfg.getNumBlockIDs())
+    return;
+  
+  SmallVector<const CFGBlock*, 10> worklist;
+  worklist.push_back(&cfg.getEntry());
+  
+  while (!worklist.empty()) {
+    const CFGBlock *block = worklist.back();
+    worklist.pop_back();
+    llvm::BitVector::reference isReachable = reachable[block->getBlockID()];
+    if (isReachable)
+      continue;
+    isReachable = true;
+    for (CFGBlock::const_succ_iterator i = block->succ_begin(),
+                                       e = block->succ_end(); i != e; ++i)
+      if (const CFGBlock *succ = *i)
+        worklist.push_back(succ);
+  }
+}
+
+static const Expr *
+LookThroughTransitiveAssignmentsAndCommaOperators(const Expr *Ex) {
+  while (Ex) {
+    const BinaryOperator *BO =
+      dyn_cast<BinaryOperator>(Ex->IgnoreParenCasts());
+    if (!BO)
+      break;
+    if (BO->getOpcode() == BO_Assign) {
+      Ex = BO->getRHS();
+      continue;
+    }
+    if (BO->getOpcode() == BO_Comma) {
+      Ex = BO->getRHS();
+      continue;
+    }
+    break;
+  }
+  return Ex;
+}
+
+namespace {
+class DeadStoreObs : public LiveVariables::Observer {
+  const CFG &cfg;
+  ASTContext &Ctx;
+  BugReporter& BR;
+  AnalysisDeclContext* AC;
+  ParentMap& Parents;
+  llvm::SmallPtrSet<const VarDecl*, 20> Escaped;
+  OwningPtr<ReachableCode> reachableCode;
+  const CFGBlock *currentBlock;
+  OwningPtr<llvm::DenseSet<const VarDecl *> > InEH;
+
+  enum DeadStoreKind { Standard, Enclosing, DeadIncrement, DeadInit };
+
+public:
+  DeadStoreObs(const CFG &cfg, ASTContext &ctx,
+               BugReporter& br, AnalysisDeclContext* ac, ParentMap& parents,
+               llvm::SmallPtrSet<const VarDecl*, 20> &escaped)
+    : cfg(cfg), Ctx(ctx), BR(br), AC(ac), Parents(parents),
+      Escaped(escaped), currentBlock(0) {}
+
+  virtual ~DeadStoreObs() {}
+
+  bool isLive(const LiveVariables::LivenessValues &Live, const VarDecl *D) {
+    if (Live.isLive(D))
+      return true;
+    // Lazily construct the set that records which VarDecls are in
+    // EH code.
+    if (!InEH.get()) {
+      InEH.reset(new llvm::DenseSet<const VarDecl *>());
+      EHCodeVisitor V(*InEH.get());
+      V.TraverseStmt(AC->getBody());
+    }
+    // Treat all VarDecls that occur in EH code as being "always live"
+    // when considering to suppress dead stores.  Frequently stores
+    // are followed by reads in EH code, but we don't have the ability
+    // to analyze that yet.
+    return InEH->count(D);
+  }
+  
+  void Report(const VarDecl *V, DeadStoreKind dsk,
+              PathDiagnosticLocation L, SourceRange R) {
+    if (Escaped.count(V))
+      return;
+    
+    // Compute reachable blocks within the CFG for trivial cases
+    // where a bogus dead store can be reported because itself is unreachable.
+    if (!reachableCode.get()) {
+      reachableCode.reset(new ReachableCode(cfg));
+      reachableCode->computeReachableBlocks();
+    }
+    
+    if (!reachableCode->isReachable(currentBlock))
+      return;
+
+    SmallString<64> buf;
+    llvm::raw_svector_ostream os(buf);
+    const char *BugType = 0;
+
+    switch (dsk) {
+      case DeadInit:
+        BugType = "Dead initialization";
+        os << "Value stored to '" << *V
+           << "' during its initialization is never read";
+        break;
+
+      case DeadIncrement:
+        BugType = "Dead increment";
+      case Standard:
+        if (!BugType) BugType = "Dead assignment";
+        os << "Value stored to '" << *V << "' is never read";
+        break;
+
+      case Enclosing:
+        // Don't report issues in this case, e.g.: "if (x = foo())",
+        // where 'x' is unused later.  We have yet to see a case where 
+        // this is a real bug.
+        return;
+    }
+
+    BR.EmitBasicReport(AC->getDecl(), BugType, "Dead store", os.str(), L, R);
+  }
+
+  void CheckVarDecl(const VarDecl *VD, const Expr *Ex, const Expr *Val,
+                    DeadStoreKind dsk,
+                    const LiveVariables::LivenessValues &Live) {
+
+    if (!VD->hasLocalStorage())
+      return;
+    // Reference types confuse the dead stores checker.  Skip them
+    // for now.
+    if (VD->getType()->getAs<ReferenceType>())
+      return;
+
+    if (!isLive(Live, VD) &&
+        !(VD->getAttr<UnusedAttr>() || VD->getAttr<BlocksAttr>())) {
+
+      PathDiagnosticLocation ExLoc =
+        PathDiagnosticLocation::createBegin(Ex, BR.getSourceManager(), AC);
+      Report(VD, dsk, ExLoc, Val->getSourceRange());
+    }
+  }
+
+  void CheckDeclRef(const DeclRefExpr *DR, const Expr *Val, DeadStoreKind dsk,
+                    const LiveVariables::LivenessValues& Live) {
+    if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl()))
+      CheckVarDecl(VD, DR, Val, dsk, Live);
+  }
+
+  bool isIncrement(VarDecl *VD, const BinaryOperator* B) {
+    if (B->isCompoundAssignmentOp())
+      return true;
+
+    const Expr *RHS = B->getRHS()->IgnoreParenCasts();
+    const BinaryOperator* BRHS = dyn_cast<BinaryOperator>(RHS);
+
+    if (!BRHS)
+      return false;
+
+    const DeclRefExpr *DR;
+
+    if ((DR = dyn_cast<DeclRefExpr>(BRHS->getLHS()->IgnoreParenCasts())))
+      if (DR->getDecl() == VD)
+        return true;
+
+    if ((DR = dyn_cast<DeclRefExpr>(BRHS->getRHS()->IgnoreParenCasts())))
+      if (DR->getDecl() == VD)
+        return true;
+
+    return false;
+  }
+
+  virtual void observeStmt(const Stmt *S, const CFGBlock *block,
+                           const LiveVariables::LivenessValues &Live) {
+
+    currentBlock = block;
+    
+    // Skip statements in macros.
+    if (S->getLocStart().isMacroID())
+      return;
+
+    // Only cover dead stores from regular assignments.  ++/-- dead stores
+    // have never flagged a real bug.
+    if (const BinaryOperator* B = dyn_cast<BinaryOperator>(S)) {
+      if (!B->isAssignmentOp()) return; // Skip non-assignments.
+
+      if (DeclRefExpr *DR = dyn_cast<DeclRefExpr>(B->getLHS()))
+        if (VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+          // Special case: check for assigning null to a pointer.
+          //  This is a common form of defensive programming.
+          const Expr *RHS =
+            LookThroughTransitiveAssignmentsAndCommaOperators(B->getRHS());
+          RHS = RHS->IgnoreParenCasts();
+          
+          QualType T = VD->getType();
+          if (T->isPointerType() || T->isObjCObjectPointerType()) {
+            if (RHS->isNullPointerConstant(Ctx, Expr::NPC_ValueDependentIsNull))
+              return;
+          }
+
+          // Special case: self-assignments.  These are often used to shut up
+          //  "unused variable" compiler warnings.
+          if (const DeclRefExpr *RhsDR = dyn_cast<DeclRefExpr>(RHS))
+            if (VD == dyn_cast<VarDecl>(RhsDR->getDecl()))
+              return;
+
+          // Otherwise, issue a warning.
+          DeadStoreKind dsk = Parents.isConsumedExpr(B)
+                              ? Enclosing
+                              : (isIncrement(VD,B) ? DeadIncrement : Standard);
+
+          CheckVarDecl(VD, DR, B->getRHS(), dsk, Live);
+        }
+    }
+    else if (const UnaryOperator* U = dyn_cast<UnaryOperator>(S)) {
+      if (!U->isIncrementOp() || U->isPrefix())
+        return;
+
+      const Stmt *parent = Parents.getParentIgnoreParenCasts(U);
+      if (!parent || !isa<ReturnStmt>(parent))
+        return;
+
+      const Expr *Ex = U->getSubExpr()->IgnoreParenCasts();
+
+      if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Ex))
+        CheckDeclRef(DR, U, DeadIncrement, Live);
+    }
+    else if (const DeclStmt *DS = dyn_cast<DeclStmt>(S))
+      // Iterate through the decls.  Warn if any initializers are complex
+      // expressions that are not live (never used).
+      for (DeclStmt::const_decl_iterator DI=DS->decl_begin(), DE=DS->decl_end();
+           DI != DE; ++DI) {
+
+        VarDecl *V = dyn_cast<VarDecl>(*DI);
+
+        if (!V)
+          continue;
+          
+        if (V->hasLocalStorage()) {          
+          // Reference types confuse the dead stores checker.  Skip them
+          // for now.
+          if (V->getType()->getAs<ReferenceType>())
+            return;
+            
+          if (const Expr *E = V->getInit()) {
+            while (const ExprWithCleanups *exprClean =
+                    dyn_cast<ExprWithCleanups>(E))
+              E = exprClean->getSubExpr();
+            
+            // Look through transitive assignments, e.g.:
+            // int x = y = 0;
+            E = LookThroughTransitiveAssignmentsAndCommaOperators(E);
+            
+            // Don't warn on C++ objects (yet) until we can show that their
+            // constructors/destructors don't have side effects.
+            if (isa<CXXConstructExpr>(E))
+              return;
+            
+            // A dead initialization is a variable that is dead after it
+            // is initialized.  We don't flag warnings for those variables
+            // marked 'unused'.
+            if (!isLive(Live, V) && V->getAttr<UnusedAttr>() == 0) {
+              // Special case: check for initializations with constants.
+              //
+              //  e.g. : int x = 0;
+              //
+              // If x is EVER assigned a new value later, don't issue
+              // a warning.  This is because such initialization can be
+              // due to defensive programming.
+              if (E->isEvaluatable(Ctx))
+                return;
+
+              if (const DeclRefExpr *DRE =
+                  dyn_cast<DeclRefExpr>(E->IgnoreParenCasts()))
+                if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+                  // Special case: check for initialization from constant
+                  //  variables.
+                  //
+                  //  e.g. extern const int MyConstant;
+                  //       int x = MyConstant;
+                  //
+                  if (VD->hasGlobalStorage() &&
+                      VD->getType().isConstQualified())
+                    return;
+                  // Special case: check for initialization from scalar
+                  //  parameters.  This is often a form of defensive
+                  //  programming.  Non-scalars are still an error since
+                  //  because it more likely represents an actual algorithmic
+                  //  bug.
+                  if (isa<ParmVarDecl>(VD) && VD->getType()->isScalarType())
+                    return;
+                }
+
+              PathDiagnosticLocation Loc =
+                PathDiagnosticLocation::create(V, BR.getSourceManager());
+              Report(V, DeadInit, Loc, E->getSourceRange());
+            }
+          }
+        }
+      }
+  }
+};
+
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Driver function to invoke the Dead-Stores checker on a CFG.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class FindEscaped : public CFGRecStmtDeclVisitor<FindEscaped>{
+  CFG *cfg;
+public:
+  FindEscaped(CFG *c) : cfg(c) {}
+
+  CFG& getCFG() { return *cfg; }
+
+  llvm::SmallPtrSet<const VarDecl*, 20> Escaped;
+
+  void VisitUnaryOperator(UnaryOperator* U) {
+    // Check for '&'.  Any VarDecl whose value has its address-taken we
+    // treat as escaped.
+    Expr *E = U->getSubExpr()->IgnoreParenCasts();
+    if (U->getOpcode() == UO_AddrOf)
+      if (DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E))
+        if (VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+          Escaped.insert(VD);
+          return;
+        }
+    Visit(E);
+  }
+};
+} // end anonymous namespace
+
+
+//===----------------------------------------------------------------------===//
+// DeadStoresChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class DeadStoresChecker : public Checker<check::ASTCodeBody> {
+public:
+  void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+                        BugReporter &BR) const {
+
+    // Don't do anything for template instantiations.
+    // Proving that code in a template instantiation is "dead"
+    // means proving that it is dead in all instantiations.
+    // This same problem exists with -Wunreachable-code.
+    if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+      if (FD->isTemplateInstantiation())
+        return;
+
+    if (LiveVariables *L = mgr.getAnalysis<LiveVariables>(D)) {
+      CFG &cfg = *mgr.getCFG(D);
+      AnalysisDeclContext *AC = mgr.getAnalysisDeclContext(D);
+      ParentMap &pmap = mgr.getParentMap(D);
+      FindEscaped FS(&cfg);
+      FS.getCFG().VisitBlockStmts(FS);
+      DeadStoreObs A(cfg, BR.getContext(), BR, AC, pmap, FS.Escaped);
+      L->runOnAllBlocks(A);
+    }
+  }
+};
+}
+
+void ento::registerDeadStoresChecker(CheckerManager &mgr) {
+  mgr.registerChecker<DeadStoresChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
new file mode 100644
index 0000000..fe12866
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
@@ -0,0 +1,181 @@
+//==- DebugCheckers.cpp - Debugging Checkers ---------------------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines checkers that display debugging information.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/Analysis/Analyses/Dominators.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Analysis/CallGraph.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/Support/Process.h"
+
+using namespace clang;
+using namespace ento;
+
+//===----------------------------------------------------------------------===//
+// DominatorsTreeDumper
+//===----------------------------------------------------------------------===//
+
+namespace {
+class DominatorsTreeDumper : public Checker<check::ASTCodeBody> {
+public:
+  void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+                        BugReporter &BR) const {
+    if (AnalysisDeclContext *AC = mgr.getAnalysisDeclContext(D)) {
+      DominatorTree dom;
+      dom.buildDominatorTree(*AC);
+      dom.dump();
+    }
+  }
+};
+}
+
+void ento::registerDominatorsTreeDumper(CheckerManager &mgr) {
+  mgr.registerChecker<DominatorsTreeDumper>();
+}
+
+//===----------------------------------------------------------------------===//
+// LiveVariablesDumper
+//===----------------------------------------------------------------------===//
+
+namespace {
+class LiveVariablesDumper : public Checker<check::ASTCodeBody> {
+public:
+  void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+                        BugReporter &BR) const {
+    if (LiveVariables* L = mgr.getAnalysis<LiveVariables>(D)) {
+      L->dumpBlockLiveness(mgr.getSourceManager());
+    }
+  }
+};
+}
+
+void ento::registerLiveVariablesDumper(CheckerManager &mgr) {
+  mgr.registerChecker<LiveVariablesDumper>();
+}
+
+//===----------------------------------------------------------------------===//
+// CFGViewer
+//===----------------------------------------------------------------------===//
+
+namespace {
+class CFGViewer : public Checker<check::ASTCodeBody> {
+public:
+  void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+                        BugReporter &BR) const {
+    if (CFG *cfg = mgr.getCFG(D)) {
+      cfg->viewCFG(mgr.getLangOpts());
+    }
+  }
+};
+}
+
+void ento::registerCFGViewer(CheckerManager &mgr) {
+  mgr.registerChecker<CFGViewer>();
+}
+
+//===----------------------------------------------------------------------===//
+// CFGDumper
+//===----------------------------------------------------------------------===//
+
+namespace {
+class CFGDumper : public Checker<check::ASTCodeBody> {
+public:
+  void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+                        BugReporter &BR) const {
+    if (CFG *cfg = mgr.getCFG(D)) {
+      cfg->dump(mgr.getLangOpts(),
+                llvm::sys::Process::StandardErrHasColors());
+    }
+  }
+};
+}
+
+void ento::registerCFGDumper(CheckerManager &mgr) {
+  mgr.registerChecker<CFGDumper>();
+}
+
+//===----------------------------------------------------------------------===//
+// CallGraphViewer
+//===----------------------------------------------------------------------===//
+
+namespace {
+class CallGraphViewer : public Checker< check::ASTDecl<TranslationUnitDecl> > {
+public:
+  void checkASTDecl(const TranslationUnitDecl *TU, AnalysisManager& mgr,
+                    BugReporter &BR) const {
+    CallGraph CG;
+    CG.addToCallGraph(const_cast<TranslationUnitDecl*>(TU));
+    CG.viewGraph();
+  }
+};
+}
+
+void ento::registerCallGraphViewer(CheckerManager &mgr) {
+  mgr.registerChecker<CallGraphViewer>();
+}
+
+//===----------------------------------------------------------------------===//
+// CallGraphDumper
+//===----------------------------------------------------------------------===//
+
+namespace {
+class CallGraphDumper : public Checker< check::ASTDecl<TranslationUnitDecl> > {
+public:
+  void checkASTDecl(const TranslationUnitDecl *TU, AnalysisManager& mgr,
+                    BugReporter &BR) const {
+    CallGraph CG;
+    CG.addToCallGraph(const_cast<TranslationUnitDecl*>(TU));
+    CG.dump();
+  }
+};
+}
+
+void ento::registerCallGraphDumper(CheckerManager &mgr) {
+  mgr.registerChecker<CallGraphDumper>();
+}
+
+
+//===----------------------------------------------------------------------===//
+// ConfigDumper
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ConfigDumper : public Checker< check::EndOfTranslationUnit > {
+public:
+  void checkEndOfTranslationUnit(const TranslationUnitDecl *TU,
+                                 AnalysisManager& mgr,
+                                 BugReporter &BR) const {
+
+    const AnalyzerOptions::ConfigTable &Config = mgr.options.Config;
+    AnalyzerOptions::ConfigTable::const_iterator I =
+      Config.begin(), E = Config.end();
+
+    std::vector<StringRef> Keys;
+    for (; I != E ; ++I) { Keys.push_back(I->getKey()); }
+    sort(Keys.begin(), Keys.end());
+    
+    llvm::errs() << "[config]\n";
+    for (unsigned i = 0, n = Keys.size(); i < n ; ++i) {
+      StringRef Key = Keys[i];
+      I = Config.find(Key);
+      llvm::errs() << Key << " = " << I->second << '\n';
+    }
+    llvm::errs() << "[stats]\n" << "num-entries = " << Keys.size() << '\n';
+  }
+};
+}
+
+void ento::registerConfigDumper(CheckerManager &mgr) {
+  mgr.registerChecker<ConfigDumper>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
new file mode 100644
index 0000000..72d46c5
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
@@ -0,0 +1,281 @@
+//== NullDerefChecker.cpp - Null dereference checker ------------*- C++ -*--==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines NullDerefChecker, a builtin check in ExprEngine that performs
+// checks for null pointers at loads and stores.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class DereferenceChecker
+    : public Checker< check::Location,
+                      check::Bind,
+                      EventDispatcher<ImplicitNullDerefEvent> > {
+  mutable OwningPtr<BuiltinBug> BT_null;
+  mutable OwningPtr<BuiltinBug> BT_undef;
+
+  void reportBug(ProgramStateRef State, const Stmt *S, CheckerContext &C,
+                 bool IsBind = false) const;
+
+public:
+  void checkLocation(SVal location, bool isLoad, const Stmt* S,
+                     CheckerContext &C) const;
+  void checkBind(SVal L, SVal V, const Stmt *S, CheckerContext &C) const;
+
+  static void AddDerefSource(raw_ostream &os,
+                             SmallVectorImpl<SourceRange> &Ranges,
+                             const Expr *Ex, const ProgramState *state,
+                             const LocationContext *LCtx,
+                             bool loadedFrom = false);
+};
+} // end anonymous namespace
+
+void
+DereferenceChecker::AddDerefSource(raw_ostream &os,
+                                   SmallVectorImpl<SourceRange> &Ranges,
+                                   const Expr *Ex,
+                                   const ProgramState *state,
+                                   const LocationContext *LCtx,
+                                   bool loadedFrom) {
+  Ex = Ex->IgnoreParenLValueCasts();
+  switch (Ex->getStmtClass()) {
+    default:
+      break;
+    case Stmt::DeclRefExprClass: {
+      const DeclRefExpr *DR = cast<DeclRefExpr>(Ex);
+      if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+        os << " (" << (loadedFrom ? "loaded from" : "from")
+           << " variable '" <<  VD->getName() << "')";
+        Ranges.push_back(DR->getSourceRange());
+      }
+      break;
+    }
+    case Stmt::MemberExprClass: {
+      const MemberExpr *ME = cast<MemberExpr>(Ex);
+      os << " (" << (loadedFrom ? "loaded from" : "via")
+         << " field '" << ME->getMemberNameInfo() << "')";
+      SourceLocation L = ME->getMemberLoc();
+      Ranges.push_back(SourceRange(L, L));
+      break;
+    }
+    case Stmt::ObjCIvarRefExprClass: {
+      const ObjCIvarRefExpr *IV = cast<ObjCIvarRefExpr>(Ex);
+      os << " (" << (loadedFrom ? "loaded from" : "via")
+         << " ivar '" << IV->getDecl()->getName() << "')";
+      SourceLocation L = IV->getLocation();
+      Ranges.push_back(SourceRange(L, L));
+      break;
+    }    
+  }
+}
+
+void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S,
+                                   CheckerContext &C, bool IsBind) const {
+  // Generate an error node.
+  ExplodedNode *N = C.generateSink(State);
+  if (!N)
+    return;
+
+  // We know that 'location' cannot be non-null.  This is what
+  // we call an "explicit" null dereference.
+  if (!BT_null)
+    BT_null.reset(new BuiltinBug("Dereference of null pointer"));
+
+  SmallString<100> buf;
+  llvm::raw_svector_ostream os(buf);
+
+  SmallVector<SourceRange, 2> Ranges;
+
+  // Walk through lvalue casts to get the original expression
+  // that syntactically caused the load.
+  if (const Expr *expr = dyn_cast<Expr>(S))
+    S = expr->IgnoreParenLValueCasts();
+
+  if (IsBind) {
+    if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(S)) {
+      if (BO->isAssignmentOp())
+        S = BO->getRHS();
+    } else if (const DeclStmt *DS = dyn_cast<DeclStmt>(S)) {
+      assert(DS->isSingleDecl() && "We process decls one by one");
+      if (const VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl()))
+        if (const Expr *Init = VD->getAnyInitializer())
+          S = Init;
+    }
+  }
+
+  switch (S->getStmtClass()) {
+  case Stmt::ArraySubscriptExprClass: {
+    os << "Array access";
+    const ArraySubscriptExpr *AE = cast<ArraySubscriptExpr>(S);
+    AddDerefSource(os, Ranges, AE->getBase()->IgnoreParenCasts(),
+                   State.getPtr(), N->getLocationContext());
+    os << " results in a null pointer dereference";
+    break;
+  }
+  case Stmt::UnaryOperatorClass: {
+    os << "Dereference of null pointer";
+    const UnaryOperator *U = cast<UnaryOperator>(S);
+    AddDerefSource(os, Ranges, U->getSubExpr()->IgnoreParens(),
+                   State.getPtr(), N->getLocationContext(), true);
+    break;
+  }
+  case Stmt::MemberExprClass: {
+    const MemberExpr *M = cast<MemberExpr>(S);
+    if (M->isArrow() || bugreporter::isDeclRefExprToReference(M->getBase())) {
+      os << "Access to field '" << M->getMemberNameInfo()
+         << "' results in a dereference of a null pointer";
+      AddDerefSource(os, Ranges, M->getBase()->IgnoreParenCasts(),
+                     State.getPtr(), N->getLocationContext(), true);
+    }
+    break;
+  }
+  case Stmt::ObjCIvarRefExprClass: {
+    const ObjCIvarRefExpr *IV = cast<ObjCIvarRefExpr>(S);
+    os << "Access to instance variable '" << *IV->getDecl()
+       << "' results in a dereference of a null pointer";
+    AddDerefSource(os, Ranges, IV->getBase()->IgnoreParenCasts(),
+                   State.getPtr(), N->getLocationContext(), true);
+    break;
+  }
+  default:
+    break;
+  }
+
+  os.flush();
+  BugReport *report =
+    new BugReport(*BT_null,
+                  buf.empty() ? BT_null->getDescription() : buf.str(),
+                  N);
+
+  bugreporter::trackNullOrUndefValue(N, bugreporter::getDerefExpr(S), *report);
+
+  for (SmallVectorImpl<SourceRange>::iterator
+       I = Ranges.begin(), E = Ranges.end(); I!=E; ++I)
+    report->addRange(*I);
+
+  C.emitReport(report);
+}
+
+void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
+                                       CheckerContext &C) const {
+  // Check for dereference of an undefined value.
+  if (l.isUndef()) {
+    if (ExplodedNode *N = C.generateSink()) {
+      if (!BT_undef)
+        BT_undef.reset(new BuiltinBug("Dereference of undefined pointer value"));
+
+      BugReport *report =
+        new BugReport(*BT_undef, BT_undef->getDescription(), N);
+      bugreporter::trackNullOrUndefValue(N, bugreporter::getDerefExpr(S),
+                                         *report);
+      C.emitReport(report);
+    }
+    return;
+  }
+
+  DefinedOrUnknownSVal location = l.castAs<DefinedOrUnknownSVal>();
+
+  // Check for null dereferences.
+  if (!location.getAs<Loc>())
+    return;
+
+  ProgramStateRef state = C.getState();
+
+  ProgramStateRef notNullState, nullState;
+  llvm::tie(notNullState, nullState) = state->assume(location);
+
+  // The explicit NULL case.
+  if (nullState) {
+    if (!notNullState) {
+      reportBug(nullState, S, C);
+      return;
+    }
+
+    // Otherwise, we have the case where the location could either be
+    // null or not-null.  Record the error node as an "implicit" null
+    // dereference.
+    if (ExplodedNode *N = C.generateSink(nullState)) {
+      ImplicitNullDerefEvent event = { l, isLoad, N, &C.getBugReporter() };
+      dispatchEvent(event);
+    }
+  }
+
+  // From this point forward, we know that the location is not null.
+  C.addTransition(notNullState);
+}
+
+void DereferenceChecker::checkBind(SVal L, SVal V, const Stmt *S,
+                                   CheckerContext &C) const {
+  // If we're binding to a reference, check if the value is known to be null.
+  if (V.isUndef())
+    return;
+
+  const MemRegion *MR = L.getAsRegion();
+  const TypedValueRegion *TVR = dyn_cast_or_null<TypedValueRegion>(MR);
+  if (!TVR)
+    return;
+
+  if (!TVR->getValueType()->isReferenceType())
+    return;
+
+  ProgramStateRef State = C.getState();
+
+  ProgramStateRef StNonNull, StNull;
+  llvm::tie(StNonNull, StNull) =
+      State->assume(V.castAs<DefinedOrUnknownSVal>());
+
+  if (StNull) {
+    if (!StNonNull) {
+      reportBug(StNull, S, C, /*isBind=*/true);
+      return;
+    }
+
+    // At this point the value could be either null or non-null.
+    // Record this as an "implicit" null dereference.
+    if (ExplodedNode *N = C.generateSink(StNull)) {
+      ImplicitNullDerefEvent event = { V, /*isLoad=*/true, N,
+                                       &C.getBugReporter() };
+      dispatchEvent(event);
+    }
+  }
+
+  // Unlike a regular null dereference, initializing a reference with a
+  // dereferenced null pointer does not actually cause a runtime exception in
+  // Clang's implementation of references.
+  //
+  //   int &r = *p; // safe??
+  //   if (p != NULL) return; // uh-oh
+  //   r = 5; // trap here
+  //
+  // The standard says this is invalid as soon as we try to create a "null
+  // reference" (there is no such thing), but turning this into an assumption
+  // that 'p' is never null will not match our actual runtime behavior.
+  // So we do not record this assumption, allowing us to warn on the last line
+  // of this example.
+  //
+  // We do need to add a transition because we may have generated a sink for
+  // the "implicit" null dereference.
+  C.addTransition(State, this);
+}
+
+void ento::registerDereferenceChecker(CheckerManager &mgr) {
+  mgr.registerChecker<DereferenceChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp
new file mode 100644
index 0000000..6d3dd1e
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp
@@ -0,0 +1,254 @@
+//=- DirectIvarAssignment.cpp - Check rules on ObjC properties -*- C++ ----*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  Check that Objective C properties are set with the setter, not though a
+//      direct assignment.
+//
+//  Two versions of a checker exist: one that checks all methods and the other
+//      that only checks the methods annotated with
+//      __attribute__((annotate("objc_no_direct_instance_variable_assignment")))
+//
+//  The checker does not warn about assignments to Ivars, annotated with
+//       __attribute__((objc_allow_direct_instance_variable_assignment"))). This
+//      annotation serves as a false positive suppression mechanism for the
+//      checker. The annotation is allowed on properties and Ivars.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/ADT/DenseMap.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+/// The default method filter, which is used to filter out the methods on which
+/// the check should not be performed.
+///
+/// Checks for the init, dealloc, and any other functions that might be allowed
+/// to perform direct instance variable assignment based on their name.
+struct MethodFilter {
+  virtual ~MethodFilter() {}
+  virtual bool operator()(ObjCMethodDecl *M) {
+    if (M->getMethodFamily() == OMF_init ||
+        M->getMethodFamily() == OMF_dealloc ||
+        M->getMethodFamily() == OMF_copy ||
+        M->getMethodFamily() == OMF_mutableCopy ||
+        M->getSelector().getNameForSlot(0).find("init") != StringRef::npos ||
+        M->getSelector().getNameForSlot(0).find("Init") != StringRef::npos)
+      return true;
+    return false;
+  }
+};
+
+static MethodFilter DefaultMethodFilter;
+
+class DirectIvarAssignment :
+  public Checker<check::ASTDecl<ObjCImplementationDecl> > {
+
+  typedef llvm::DenseMap<const ObjCIvarDecl*,
+                         const ObjCPropertyDecl*> IvarToPropertyMapTy;
+
+  /// A helper class, which walks the AST and locates all assignments to ivars
+  /// in the given function.
+  class MethodCrawler : public ConstStmtVisitor<MethodCrawler> {
+    const IvarToPropertyMapTy &IvarToPropMap;
+    const ObjCMethodDecl *MD;
+    const ObjCInterfaceDecl *InterfD;
+    BugReporter &BR;
+    LocationOrAnalysisDeclContext DCtx;
+
+  public:
+    MethodCrawler(const IvarToPropertyMapTy &InMap, const ObjCMethodDecl *InMD,
+        const ObjCInterfaceDecl *InID,
+        BugReporter &InBR, AnalysisDeclContext *InDCtx)
+    : IvarToPropMap(InMap), MD(InMD), InterfD(InID), BR(InBR), DCtx(InDCtx) {}
+
+    void VisitStmt(const Stmt *S) { VisitChildren(S); }
+
+    void VisitBinaryOperator(const BinaryOperator *BO);
+
+    void VisitChildren(const Stmt *S) {
+      for (Stmt::const_child_range I = S->children(); I; ++I)
+        if (*I)
+         this->Visit(*I);
+    }
+  };
+
+public:
+  MethodFilter *ShouldSkipMethod;
+
+  DirectIvarAssignment() : ShouldSkipMethod(&DefaultMethodFilter) {}
+
+  void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager& Mgr,
+                    BugReporter &BR) const;
+};
+
+static const ObjCIvarDecl *findPropertyBackingIvar(const ObjCPropertyDecl *PD,
+                                               const ObjCInterfaceDecl *InterD,
+                                               ASTContext &Ctx) {
+  // Check for synthesized ivars.
+  ObjCIvarDecl *ID = PD->getPropertyIvarDecl();
+  if (ID)
+    return ID;
+
+  ObjCInterfaceDecl *NonConstInterD = const_cast<ObjCInterfaceDecl*>(InterD);
+
+  // Check for existing "_PropName".
+  ID = NonConstInterD->lookupInstanceVariable(PD->getDefaultSynthIvarName(Ctx));
+  if (ID)
+    return ID;
+
+  // Check for existing "PropName".
+  IdentifierInfo *PropIdent = PD->getIdentifier();
+  ID = NonConstInterD->lookupInstanceVariable(PropIdent);
+
+  return ID;
+}
+
+void DirectIvarAssignment::checkASTDecl(const ObjCImplementationDecl *D,
+                                       AnalysisManager& Mgr,
+                                       BugReporter &BR) const {
+  const ObjCInterfaceDecl *InterD = D->getClassInterface();
+
+
+  IvarToPropertyMapTy IvarToPropMap;
+
+  // Find all properties for this class.
+  for (ObjCInterfaceDecl::prop_iterator I = InterD->prop_begin(),
+      E = InterD->prop_end(); I != E; ++I) {
+    ObjCPropertyDecl *PD = *I;
+
+    // Find the corresponding IVar.
+    const ObjCIvarDecl *ID = findPropertyBackingIvar(PD, InterD,
+                                                     Mgr.getASTContext());
+
+    if (!ID)
+      continue;
+
+    // Store the IVar to property mapping.
+    IvarToPropMap[ID] = PD;
+  }
+
+  if (IvarToPropMap.empty())
+    return;
+
+  for (ObjCImplementationDecl::instmeth_iterator I = D->instmeth_begin(),
+      E = D->instmeth_end(); I != E; ++I) {
+
+    ObjCMethodDecl *M = *I;
+    AnalysisDeclContext *DCtx = Mgr.getAnalysisDeclContext(M);
+
+    if ((*ShouldSkipMethod)(M))
+      continue;
+
+    const Stmt *Body = M->getBody();
+    assert(Body);
+
+    MethodCrawler MC(IvarToPropMap, M->getCanonicalDecl(), InterD, BR, DCtx);
+    MC.VisitStmt(Body);
+  }
+}
+
+static bool isAnnotatedToAllowDirectAssignment(const Decl *D) {
+  for (specific_attr_iterator<AnnotateAttr>
+       AI = D->specific_attr_begin<AnnotateAttr>(),
+       AE = D->specific_attr_end<AnnotateAttr>(); AI != AE; ++AI) {
+    const AnnotateAttr *Ann = *AI;
+    if (Ann->getAnnotation() ==
+        "objc_allow_direct_instance_variable_assignment")
+      return true;
+  }
+  return false;
+}
+
+void DirectIvarAssignment::MethodCrawler::VisitBinaryOperator(
+                                                    const BinaryOperator *BO) {
+  if (!BO->isAssignmentOp())
+    return;
+
+  const ObjCIvarRefExpr *IvarRef =
+          dyn_cast<ObjCIvarRefExpr>(BO->getLHS()->IgnoreParenCasts());
+
+  if (!IvarRef)
+    return;
+
+  if (const ObjCIvarDecl *D = IvarRef->getDecl()) {
+    IvarToPropertyMapTy::const_iterator I = IvarToPropMap.find(D);
+
+    if (I != IvarToPropMap.end()) {
+      const ObjCPropertyDecl *PD = I->second;
+      // Skip warnings on Ivars, annotated with
+      // objc_allow_direct_instance_variable_assignment. This annotation serves
+      // as a false positive suppression mechanism for the checker. The
+      // annotation is allowed on properties and ivars.
+      if (isAnnotatedToAllowDirectAssignment(PD) ||
+          isAnnotatedToAllowDirectAssignment(D))
+        return;
+
+      ObjCMethodDecl *GetterMethod =
+          InterfD->getInstanceMethod(PD->getGetterName());
+      ObjCMethodDecl *SetterMethod =
+          InterfD->getInstanceMethod(PD->getSetterName());
+
+      if (SetterMethod && SetterMethod->getCanonicalDecl() == MD)
+        return;
+
+      if (GetterMethod && GetterMethod->getCanonicalDecl() == MD)
+        return;
+
+      BR.EmitBasicReport(MD,
+          "Property access",
+          categories::CoreFoundationObjectiveC,
+          "Direct assignment to an instance variable backing a property; "
+          "use the setter instead", PathDiagnosticLocation(IvarRef,
+                                                          BR.getSourceManager(),
+                                                          DCtx));
+    }
+  }
+}
+}
+
+// Register the checker that checks for direct accesses in all functions,
+// except for the initialization and copy routines.
+void ento::registerDirectIvarAssignment(CheckerManager &mgr) {
+  mgr.registerChecker<DirectIvarAssignment>();
+}
+
+// Register the checker that checks for direct accesses in functions annotated
+// with __attribute__((annotate("objc_no_direct_instance_variable_assignment"))).
+namespace {
+struct InvalidatorMethodFilter : MethodFilter {
+  virtual ~InvalidatorMethodFilter() {}
+  virtual bool operator()(ObjCMethodDecl *M) {
+    for (specific_attr_iterator<AnnotateAttr>
+         AI = M->specific_attr_begin<AnnotateAttr>(),
+         AE = M->specific_attr_end<AnnotateAttr>(); AI != AE; ++AI) {
+      const AnnotateAttr *Ann = *AI;
+      if (Ann->getAnnotation() == "objc_no_direct_instance_variable_assignment")
+        return false;
+    }
+    return true;
+  }
+};
+
+InvalidatorMethodFilter AttrFilter;
+}
+
+void ento::registerDirectIvarAssignmentForAnnotatedFunctions(
+    CheckerManager &mgr) {
+  mgr.registerChecker<DirectIvarAssignment>()->ShouldSkipMethod = &AttrFilter;
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
new file mode 100644
index 0000000..93daf94
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
@@ -0,0 +1,92 @@
+//== DivZeroChecker.cpp - Division by zero checker --------------*- C++ -*--==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines DivZeroChecker, a builtin check in ExprEngine that performs
+// checks for division by zeros.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class DivZeroChecker : public Checker< check::PreStmt<BinaryOperator> > {
+  mutable OwningPtr<BuiltinBug> BT;
+  void reportBug(const char *Msg,
+                 ProgramStateRef StateZero,
+                 CheckerContext &C) const ;
+public:
+  void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
+};  
+} // end anonymous namespace
+
+void DivZeroChecker::reportBug(const char *Msg,
+                               ProgramStateRef StateZero,
+                               CheckerContext &C) const {
+  if (ExplodedNode *N = C.generateSink(StateZero)) {
+    if (!BT)
+      BT.reset(new BuiltinBug("Division by zero"));
+
+    BugReport *R = new BugReport(*BT, Msg, N);
+    bugreporter::trackNullOrUndefValue(N, bugreporter::GetDenomExpr(N), *R);
+    C.emitReport(R);
+  }
+}
+
+void DivZeroChecker::checkPreStmt(const BinaryOperator *B,
+                                  CheckerContext &C) const {
+  BinaryOperator::Opcode Op = B->getOpcode();
+  if (Op != BO_Div &&
+      Op != BO_Rem &&
+      Op != BO_DivAssign &&
+      Op != BO_RemAssign)
+    return;
+
+  if (!B->getRHS()->getType()->isScalarType())
+    return;
+
+  SVal Denom = C.getState()->getSVal(B->getRHS(), C.getLocationContext());
+  Optional<DefinedSVal> DV = Denom.getAs<DefinedSVal>();
+
+  // Divide-by-undefined handled in the generic checking for uses of
+  // undefined values.
+  if (!DV)
+    return;
+
+  // Check for divide by zero.
+  ConstraintManager &CM = C.getConstraintManager();
+  ProgramStateRef stateNotZero, stateZero;
+  llvm::tie(stateNotZero, stateZero) = CM.assumeDual(C.getState(), *DV);
+
+  if (!stateNotZero) {
+    assert(stateZero);
+    reportBug("Division by zero", stateZero, C);
+    return;
+  }
+
+  bool TaintedD = C.getState()->isTainted(*DV);
+  if ((stateNotZero && stateZero && TaintedD)) {
+    reportBug("Division by a tainted value, possibly zero", stateZero, C);
+    return;
+  }
+
+  // If we get here, then the denom should not be zero. We abandon the implicit
+  // zero denom case for now.
+  C.addTransition(stateNotZero);
+}
+
+void ento::registerDivZeroChecker(CheckerManager &mgr) {
+  mgr.registerChecker<DivZeroChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
new file mode 100644
index 0000000..759aa66
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
@@ -0,0 +1,281 @@
+//== DynamicTypePropagation.cpp ----------------------------------- -*- C++ -*--=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker defines the rules for dynamic type gathering and propagation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class DynamicTypePropagation:
+    public Checker< check::PreCall,
+                    check::PostCall,
+                    check::PostStmt<ImplicitCastExpr>,
+                    check::PostStmt<CXXNewExpr> > {
+  const ObjCObjectType *getObjectTypeForAllocAndNew(const ObjCMessageExpr *MsgE,
+                                                    CheckerContext &C) const;
+
+  /// \brief Return a better dynamic type if one can be derived from the cast.
+  const ObjCObjectPointerType *getBetterObjCType(const Expr *CastE,
+                                                 CheckerContext &C) const;
+public:
+  void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+  void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+  void checkPostStmt(const ImplicitCastExpr *CastE, CheckerContext &C) const;
+  void checkPostStmt(const CXXNewExpr *NewE, CheckerContext &C) const;
+};
+}
+
+static void recordFixedType(const MemRegion *Region, const CXXMethodDecl *MD,
+                            CheckerContext &C) {
+  assert(Region);
+  assert(MD);
+
+  ASTContext &Ctx = C.getASTContext();
+  QualType Ty = Ctx.getPointerType(Ctx.getRecordType(MD->getParent()));
+
+  ProgramStateRef State = C.getState();
+  State = State->setDynamicTypeInfo(Region, Ty, /*CanBeSubclass=*/false);
+  C.addTransition(State);
+  return;
+}
+
+void DynamicTypePropagation::checkPreCall(const CallEvent &Call,
+                                          CheckerContext &C) const {
+  if (const CXXConstructorCall *Ctor = dyn_cast<CXXConstructorCall>(&Call)) {
+    // C++11 [class.cdtor]p4: When a virtual function is called directly or
+    //   indirectly from a constructor or from a destructor, including during
+    //   the construction or destruction of the class’s non-static data members,
+    //   and the object to which the call applies is the object under
+    //   construction or destruction, the function called is the final overrider
+    //   in the constructor's or destructor's class and not one overriding it in
+    //   a more-derived class.
+
+    switch (Ctor->getOriginExpr()->getConstructionKind()) {
+    case CXXConstructExpr::CK_Complete:
+    case CXXConstructExpr::CK_Delegating:
+      // No additional type info necessary.
+      return;
+    case CXXConstructExpr::CK_NonVirtualBase:
+    case CXXConstructExpr::CK_VirtualBase:
+      if (const MemRegion *Target = Ctor->getCXXThisVal().getAsRegion())
+        recordFixedType(Target, Ctor->getDecl(), C);
+      return;
+    }
+
+    return;
+  }
+
+  if (const CXXDestructorCall *Dtor = dyn_cast<CXXDestructorCall>(&Call)) {
+    // C++11 [class.cdtor]p4 (see above)
+    if (!Dtor->isBaseDestructor())
+      return;
+
+    const MemRegion *Target = Dtor->getCXXThisVal().getAsRegion();
+    if (!Target)
+      return;
+
+    const Decl *D = Dtor->getDecl();
+    if (!D)
+      return;
+
+    recordFixedType(Target, cast<CXXDestructorDecl>(D), C);
+    return;
+  }
+}
+
+void DynamicTypePropagation::checkPostCall(const CallEvent &Call,
+                                           CheckerContext &C) const {
+  // We can obtain perfect type info for return values from some calls.
+  if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(&Call)) {
+
+    // Get the returned value if it's a region.
+    const MemRegion *RetReg = Call.getReturnValue().getAsRegion();
+    if (!RetReg)
+      return;
+
+    ProgramStateRef State = C.getState();
+    const ObjCMethodDecl *D = Msg->getDecl();
+    
+    if (D && D->hasRelatedResultType()) {
+      switch (Msg->getMethodFamily()) {
+      default:
+        break;
+
+      // We assume that the type of the object returned by alloc and new are the
+      // pointer to the object of the class specified in the receiver of the
+      // message.
+      case OMF_alloc:
+      case OMF_new: {
+        // Get the type of object that will get created.
+        const ObjCMessageExpr *MsgE = Msg->getOriginExpr();
+        const ObjCObjectType *ObjTy = getObjectTypeForAllocAndNew(MsgE, C);
+        if (!ObjTy)
+          return;
+        QualType DynResTy =
+                 C.getASTContext().getObjCObjectPointerType(QualType(ObjTy, 0));
+        C.addTransition(State->setDynamicTypeInfo(RetReg, DynResTy, false));
+        break;
+      }
+      case OMF_init: {
+        // Assume, the result of the init method has the same dynamic type as
+        // the receiver and propagate the dynamic type info.
+        const MemRegion *RecReg = Msg->getReceiverSVal().getAsRegion();
+        if (!RecReg)
+          return;
+        DynamicTypeInfo RecDynType = State->getDynamicTypeInfo(RecReg);
+        C.addTransition(State->setDynamicTypeInfo(RetReg, RecDynType));
+        break;
+      }
+      }
+    }
+    return;
+  }
+
+  if (const CXXConstructorCall *Ctor = dyn_cast<CXXConstructorCall>(&Call)) {
+    // We may need to undo the effects of our pre-call check.
+    switch (Ctor->getOriginExpr()->getConstructionKind()) {
+    case CXXConstructExpr::CK_Complete:
+    case CXXConstructExpr::CK_Delegating:
+      // No additional work necessary.
+      // Note: This will leave behind the actual type of the object for
+      // complete constructors, but arguably that's a good thing, since it
+      // means the dynamic type info will be correct even for objects
+      // constructed with operator new.
+      return;
+    case CXXConstructExpr::CK_NonVirtualBase:
+    case CXXConstructExpr::CK_VirtualBase:
+      if (const MemRegion *Target = Ctor->getCXXThisVal().getAsRegion()) {
+        // We just finished a base constructor. Now we can use the subclass's
+        // type when resolving virtual calls.
+        const Decl *D = C.getLocationContext()->getDecl();
+        recordFixedType(Target, cast<CXXConstructorDecl>(D), C);
+      }
+      return;
+    }
+  }
+}
+
+void DynamicTypePropagation::checkPostStmt(const ImplicitCastExpr *CastE,
+                                           CheckerContext &C) const {
+  // We only track dynamic type info for regions.
+  const MemRegion *ToR = C.getSVal(CastE).getAsRegion();
+  if (!ToR)
+    return;
+
+  switch (CastE->getCastKind()) {
+  default:
+    break;
+  case CK_BitCast:
+    // Only handle ObjCObjects for now.
+    if (const Type *NewTy = getBetterObjCType(CastE, C))
+      C.addTransition(C.getState()->setDynamicTypeInfo(ToR, QualType(NewTy,0)));
+    break;
+  }
+  return;
+}
+
+void DynamicTypePropagation::checkPostStmt(const CXXNewExpr *NewE,
+                                           CheckerContext &C) const {
+  if (NewE->isArray())
+    return;
+
+  // We only track dynamic type info for regions.
+  const MemRegion *MR = C.getSVal(NewE).getAsRegion();
+  if (!MR)
+    return;
+  
+  C.addTransition(C.getState()->setDynamicTypeInfo(MR, NewE->getType(),
+                                                   /*CanBeSubclass=*/false));
+}
+
+const ObjCObjectType *
+DynamicTypePropagation::getObjectTypeForAllocAndNew(const ObjCMessageExpr *MsgE,
+                                                    CheckerContext &C) const {
+  if (MsgE->getReceiverKind() == ObjCMessageExpr::Class) {
+    if (const ObjCObjectType *ObjTy
+          = MsgE->getClassReceiver()->getAs<ObjCObjectType>())
+    return ObjTy;
+  }
+
+  if (MsgE->getReceiverKind() == ObjCMessageExpr::SuperClass) {
+    if (const ObjCObjectType *ObjTy
+          = MsgE->getSuperType()->getAs<ObjCObjectType>())
+      return ObjTy;
+  }
+
+  const Expr *RecE = MsgE->getInstanceReceiver();
+  if (!RecE)
+    return 0;
+
+  RecE= RecE->IgnoreParenImpCasts();
+  if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(RecE)) {
+    const StackFrameContext *SFCtx = C.getStackFrame();
+    // Are we calling [self alloc]? If this is self, get the type of the
+    // enclosing ObjC class.
+    if (DRE->getDecl() == SFCtx->getSelfDecl()) {
+      if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(SFCtx->getDecl()))
+        if (const ObjCObjectType *ObjTy =
+            dyn_cast<ObjCObjectType>(MD->getClassInterface()->getTypeForDecl()))
+          return ObjTy;
+    }
+  }
+  return 0;
+}
+
+// Return a better dynamic type if one can be derived from the cast.
+// Compare the current dynamic type of the region and the new type to which we
+// are casting. If the new type is lower in the inheritance hierarchy, pick it.
+const ObjCObjectPointerType *
+DynamicTypePropagation::getBetterObjCType(const Expr *CastE,
+                                          CheckerContext &C) const {
+  const MemRegion *ToR = C.getSVal(CastE).getAsRegion();
+  assert(ToR);
+
+  // Get the old and new types.
+  const ObjCObjectPointerType *NewTy =
+      CastE->getType()->getAs<ObjCObjectPointerType>();
+  if (!NewTy)
+    return 0;
+  QualType OldDTy = C.getState()->getDynamicTypeInfo(ToR).getType();
+  if (OldDTy.isNull()) {
+    return NewTy;
+  }
+  const ObjCObjectPointerType *OldTy =
+    OldDTy->getAs<ObjCObjectPointerType>();
+  if (!OldTy)
+    return 0;
+
+  // Id the old type is 'id', the new one is more precise.
+  if (OldTy->isObjCIdType() && !NewTy->isObjCIdType())
+    return NewTy;
+
+  // Return new if it's a subclass of old.
+  const ObjCInterfaceDecl *ToI = NewTy->getInterfaceDecl();
+  const ObjCInterfaceDecl *FromI = OldTy->getInterfaceDecl();
+  if (ToI && FromI && FromI->isSuperClassOf(ToI))
+    return NewTy;
+
+  return 0;
+}
+
+void ento::registerDynamicTypePropagation(CheckerManager &mgr) {
+  mgr.registerChecker<DynamicTypePropagation>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
new file mode 100644
index 0000000..810473f
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
@@ -0,0 +1,123 @@
+//==- ExprInspectionChecker.cpp - Used for regression tests ------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/ADT/StringSwitch.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class ExprInspectionChecker : public Checker< eval::Call > {
+  mutable OwningPtr<BugType> BT;
+
+  void analyzerEval(const CallExpr *CE, CheckerContext &C) const;
+  void analyzerCheckInlined(const CallExpr *CE, CheckerContext &C) const;
+
+  typedef void (ExprInspectionChecker::*FnCheck)(const CallExpr *,
+                                                 CheckerContext &C) const;
+
+public:
+  bool evalCall(const CallExpr *CE, CheckerContext &C) const;
+};
+}
+
+bool ExprInspectionChecker::evalCall(const CallExpr *CE,
+                                     CheckerContext &C) const {
+  // These checks should have no effect on the surrounding environment
+  // (globals should not be invalidated, etc), hence the use of evalCall.
+  FnCheck Handler = llvm::StringSwitch<FnCheck>(C.getCalleeName(CE))
+    .Case("clang_analyzer_eval", &ExprInspectionChecker::analyzerEval)
+    .Case("clang_analyzer_checkInlined",
+          &ExprInspectionChecker::analyzerCheckInlined)
+    .Default(0);
+
+  if (!Handler)
+    return false;
+
+  (this->*Handler)(CE, C);
+  return true;
+}
+
+static const char *getArgumentValueString(const CallExpr *CE,
+                                          CheckerContext &C) {
+  if (CE->getNumArgs() == 0)
+    return "Missing assertion argument";
+
+  ExplodedNode *N = C.getPredecessor();
+  const LocationContext *LC = N->getLocationContext();
+  ProgramStateRef State = N->getState();
+
+  const Expr *Assertion = CE->getArg(0);
+  SVal AssertionVal = State->getSVal(Assertion, LC);
+
+  if (AssertionVal.isUndef())
+    return "UNDEFINED";
+
+  ProgramStateRef StTrue, StFalse;
+  llvm::tie(StTrue, StFalse) =
+    State->assume(AssertionVal.castAs<DefinedOrUnknownSVal>());
+
+  if (StTrue) {
+    if (StFalse)
+      return "UNKNOWN";
+    else
+      return "TRUE";
+  } else {
+    if (StFalse)
+      return "FALSE";
+    else
+      llvm_unreachable("Invalid constraint; neither true or false.");
+  }
+}
+
+void ExprInspectionChecker::analyzerEval(const CallExpr *CE,
+                                         CheckerContext &C) const {
+  ExplodedNode *N = C.getPredecessor();
+  const LocationContext *LC = N->getLocationContext();
+
+  // A specific instantiation of an inlined function may have more constrained
+  // values than can generally be assumed. Skip the check.
+  if (LC->getCurrentStackFrame()->getParent() != 0)
+    return;
+
+  if (!BT)
+    BT.reset(new BugType("Checking analyzer assumptions", "debug"));
+
+  BugReport *R = new BugReport(*BT, getArgumentValueString(CE, C), N);
+  C.emitReport(R);
+}
+
+void ExprInspectionChecker::analyzerCheckInlined(const CallExpr *CE,
+                                                 CheckerContext &C) const {
+  ExplodedNode *N = C.getPredecessor();
+  const LocationContext *LC = N->getLocationContext();
+
+  // An inlined function could conceivably also be analyzed as a top-level
+  // function. We ignore this case and only emit a message (TRUE or FALSE)
+  // when we are analyzing it as an inlined function. This means that
+  // clang_analyzer_checkInlined(true) should always print TRUE, but
+  // clang_analyzer_checkInlined(false) should never actually print anything.
+  if (LC->getCurrentStackFrame()->getParent() == 0)
+    return;
+
+  if (!BT)
+    BT.reset(new BugType("Checking analyzer assumptions", "debug"));
+
+  BugReport *R = new BugReport(*BT, getArgumentValueString(CE, C), N);
+  C.emitReport(R);
+}
+
+void ento::registerExprInspectionChecker(CheckerManager &Mgr) {
+  Mgr.registerChecker<ExprInspectionChecker>();
+}
+
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
new file mode 100644
index 0000000..085a991
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
@@ -0,0 +1,67 @@
+//=== FixedAddressChecker.cpp - Fixed address usage checker ----*- C++ -*--===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This files defines FixedAddressChecker, a builtin checker that checks for
+// assignment of a fixed address to a pointer.
+// This check corresponds to CWE-587.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class FixedAddressChecker 
+  : public Checker< check::PreStmt<BinaryOperator> > {
+  mutable OwningPtr<BuiltinBug> BT;
+
+public:
+  void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
+};
+}
+
+void FixedAddressChecker::checkPreStmt(const BinaryOperator *B,
+                                       CheckerContext &C) const {
+  // Using a fixed address is not portable because that address will probably
+  // not be valid in all environments or platforms.
+
+  if (B->getOpcode() != BO_Assign)
+    return;
+
+  QualType T = B->getType();
+  if (!T->isPointerType())
+    return;
+
+  ProgramStateRef state = C.getState();
+  SVal RV = state->getSVal(B->getRHS(), C.getLocationContext());
+
+  if (!RV.isConstant() || RV.isZeroConstant())
+    return;
+
+  if (ExplodedNode *N = C.addTransition()) {
+    if (!BT)
+      BT.reset(new BuiltinBug("Use fixed address", 
+                          "Using a fixed address is not portable because that "
+                          "address will probably not be valid in all "
+                          "environments or platforms."));
+    BugReport *R = new BugReport(*BT, BT->getDescription(), N);
+    R->addRange(B->getRHS()->getSourceRange());
+    C.emitReport(R);
+  }
+}
+
+void ento::registerFixedAddressChecker(CheckerManager &mgr) {
+  mgr.registerChecker<FixedAddressChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
new file mode 100644
index 0000000..c67c597
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
@@ -0,0 +1,744 @@
+//== GenericTaintChecker.cpp ----------------------------------- -*- C++ -*--=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker defines the attack surface for generic taint propagation.
+//
+// The taint information produced by it might be useful to other checkers. For
+// example, checkers should report errors which involve tainted data more
+// aggressively, even if the involved symbols are under constrained.
+//
+//===----------------------------------------------------------------------===//
+#include "ClangSACheckers.h"
+#include "clang/AST/Attr.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include <climits>
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class GenericTaintChecker : public Checker< check::PostStmt<CallExpr>,
+                                            check::PreStmt<CallExpr> > {
+public:
+  static void *getTag() { static int Tag; return &Tag; }
+
+  void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
+  void checkPostStmt(const DeclRefExpr *DRE, CheckerContext &C) const;
+
+  void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+
+private:
+  static const unsigned InvalidArgIndex = UINT_MAX;
+  /// Denotes the return vale.
+  static const unsigned ReturnValueIndex = UINT_MAX - 1;
+
+  mutable OwningPtr<BugType> BT;
+  inline void initBugType() const {
+    if (!BT)
+      BT.reset(new BugType("Use of Untrusted Data", "Untrusted Data"));
+  }
+
+  /// \brief Catch taint related bugs. Check if tainted data is passed to a
+  /// system call etc.
+  bool checkPre(const CallExpr *CE, CheckerContext &C) const;
+
+  /// \brief Add taint sources on a pre-visit.
+  void addSourcesPre(const CallExpr *CE, CheckerContext &C) const;
+
+  /// \brief Propagate taint generated at pre-visit.
+  bool propagateFromPre(const CallExpr *CE, CheckerContext &C) const;
+
+  /// \brief Add taint sources on a post visit.
+  void addSourcesPost(const CallExpr *CE, CheckerContext &C) const;
+
+  /// Check if the region the expression evaluates to is the standard input,
+  /// and thus, is tainted.
+  static bool isStdin(const Expr *E, CheckerContext &C);
+
+  /// \brief Given a pointer argument, get the symbol of the value it contains
+  /// (points to).
+  static SymbolRef getPointedToSymbol(CheckerContext &C, const Expr *Arg);
+
+  /// Functions defining the attack surface.
+  typedef ProgramStateRef (GenericTaintChecker::*FnCheck)(const CallExpr *,
+                                                       CheckerContext &C) const;
+  ProgramStateRef postScanf(const CallExpr *CE, CheckerContext &C) const;
+  ProgramStateRef postSocket(const CallExpr *CE, CheckerContext &C) const;
+  ProgramStateRef postRetTaint(const CallExpr *CE, CheckerContext &C) const;
+
+  /// Taint the scanned input if the file is tainted.
+  ProgramStateRef preFscanf(const CallExpr *CE, CheckerContext &C) const;
+
+  /// Check for CWE-134: Uncontrolled Format String.
+  static const char MsgUncontrolledFormatString[];
+  bool checkUncontrolledFormatString(const CallExpr *CE,
+                                     CheckerContext &C) const;
+
+  /// Check for:
+  /// CERT/STR02-C. "Sanitize data passed to complex subsystems"
+  /// CWE-78, "Failure to Sanitize Data into an OS Command"
+  static const char MsgSanitizeSystemArgs[];
+  bool checkSystemCall(const CallExpr *CE, StringRef Name,
+                       CheckerContext &C) const;
+
+  /// Check if tainted data is used as a buffer size ins strn.. functions,
+  /// and allocators.
+  static const char MsgTaintedBufferSize[];
+  bool checkTaintedBufferSize(const CallExpr *CE, const FunctionDecl *FDecl,
+                              CheckerContext &C) const;
+
+  /// Generate a report if the expression is tainted or points to tainted data.
+  bool generateReportIfTainted(const Expr *E, const char Msg[],
+                               CheckerContext &C) const;
+                               
+  
+  typedef SmallVector<unsigned, 2> ArgVector;
+
+  /// \brief A struct used to specify taint propagation rules for a function.
+  ///
+  /// If any of the possible taint source arguments is tainted, all of the
+  /// destination arguments should also be tainted. Use InvalidArgIndex in the
+  /// src list to specify that all of the arguments can introduce taint. Use
+  /// InvalidArgIndex in the dst arguments to signify that all the non-const
+  /// pointer and reference arguments might be tainted on return. If
+  /// ReturnValueIndex is added to the dst list, the return value will be
+  /// tainted.
+  struct TaintPropagationRule {
+    /// List of arguments which can be taint sources and should be checked.
+    ArgVector SrcArgs;
+    /// List of arguments which should be tainted on function return.
+    ArgVector DstArgs;
+    // TODO: Check if using other data structures would be more optimal.
+
+    TaintPropagationRule() {}
+
+    TaintPropagationRule(unsigned SArg,
+                         unsigned DArg, bool TaintRet = false) {
+      SrcArgs.push_back(SArg);
+      DstArgs.push_back(DArg);
+      if (TaintRet)
+        DstArgs.push_back(ReturnValueIndex);
+    }
+
+    TaintPropagationRule(unsigned SArg1, unsigned SArg2,
+                         unsigned DArg, bool TaintRet = false) {
+      SrcArgs.push_back(SArg1);
+      SrcArgs.push_back(SArg2);
+      DstArgs.push_back(DArg);
+      if (TaintRet)
+        DstArgs.push_back(ReturnValueIndex);
+    }
+
+    /// Get the propagation rule for a given function.
+    static TaintPropagationRule
+      getTaintPropagationRule(const FunctionDecl *FDecl,
+                              StringRef Name,
+                              CheckerContext &C);
+
+    inline void addSrcArg(unsigned A) { SrcArgs.push_back(A); }
+    inline void addDstArg(unsigned A)  { DstArgs.push_back(A); }
+
+    inline bool isNull() const { return SrcArgs.empty(); }
+
+    inline bool isDestinationArgument(unsigned ArgNum) const {
+      return (std::find(DstArgs.begin(),
+                        DstArgs.end(), ArgNum) != DstArgs.end());
+    }
+
+    static inline bool isTaintedOrPointsToTainted(const Expr *E,
+                                                  ProgramStateRef State,
+                                                  CheckerContext &C) {
+      return (State->isTainted(E, C.getLocationContext()) || isStdin(E, C) ||
+              (E->getType().getTypePtr()->isPointerType() &&
+               State->isTainted(getPointedToSymbol(C, E))));
+    }
+
+    /// \brief Pre-process a function which propagates taint according to the
+    /// taint rule.
+    ProgramStateRef process(const CallExpr *CE, CheckerContext &C) const;
+
+  };
+};
+
+const unsigned GenericTaintChecker::ReturnValueIndex;
+const unsigned GenericTaintChecker::InvalidArgIndex;
+
+const char GenericTaintChecker::MsgUncontrolledFormatString[] =
+  "Untrusted data is used as a format string "
+  "(CWE-134: Uncontrolled Format String)";
+
+const char GenericTaintChecker::MsgSanitizeSystemArgs[] =
+  "Untrusted data is passed to a system call "
+  "(CERT/STR02-C. Sanitize data passed to complex subsystems)";
+
+const char GenericTaintChecker::MsgTaintedBufferSize[] =
+  "Untrusted data is used to specify the buffer size "
+  "(CERT/STR31-C. Guarantee that storage for strings has sufficient space for "
+  "character data and the null terminator)";
+
+} // end of anonymous namespace
+
+/// A set which is used to pass information from call pre-visit instruction
+/// to the call post-visit. The values are unsigned integers, which are either
+/// ReturnValueIndex, or indexes of the pointer/reference argument, which
+/// points to data, which should be tainted on return.
+REGISTER_SET_WITH_PROGRAMSTATE(TaintArgsOnPostVisit, unsigned)
+
+GenericTaintChecker::TaintPropagationRule
+GenericTaintChecker::TaintPropagationRule::getTaintPropagationRule(
+                                                     const FunctionDecl *FDecl,
+                                                     StringRef Name,
+                                                     CheckerContext &C) {
+  // TODO: Currently, we might loose precision here: we always mark a return
+  // value as tainted even if it's just a pointer, pointing to tainted data.
+
+  // Check for exact name match for functions without builtin substitutes.
+  TaintPropagationRule Rule = llvm::StringSwitch<TaintPropagationRule>(Name)
+    .Case("atoi", TaintPropagationRule(0, ReturnValueIndex))
+    .Case("atol", TaintPropagationRule(0, ReturnValueIndex))
+    .Case("atoll", TaintPropagationRule(0, ReturnValueIndex))
+    .Case("getc", TaintPropagationRule(0, ReturnValueIndex))
+    .Case("fgetc", TaintPropagationRule(0, ReturnValueIndex))
+    .Case("getc_unlocked", TaintPropagationRule(0, ReturnValueIndex))
+    .Case("getw", TaintPropagationRule(0, ReturnValueIndex))
+    .Case("toupper", TaintPropagationRule(0, ReturnValueIndex))
+    .Case("tolower", TaintPropagationRule(0, ReturnValueIndex))
+    .Case("strchr", TaintPropagationRule(0, ReturnValueIndex))
+    .Case("strrchr", TaintPropagationRule(0, ReturnValueIndex))
+    .Case("read", TaintPropagationRule(0, 2, 1, true))
+    .Case("pread", TaintPropagationRule(InvalidArgIndex, 1, true))
+    .Case("gets", TaintPropagationRule(InvalidArgIndex, 0, true))
+    .Case("fgets", TaintPropagationRule(2, 0, true))
+    .Case("getline", TaintPropagationRule(2, 0))
+    .Case("getdelim", TaintPropagationRule(3, 0))
+    .Case("fgetln", TaintPropagationRule(0, ReturnValueIndex))
+    .Default(TaintPropagationRule());
+
+  if (!Rule.isNull())
+    return Rule;
+
+  // Check if it's one of the memory setting/copying functions.
+  // This check is specialized but faster then calling isCLibraryFunction.
+  unsigned BId = 0;
+  if ( (BId = FDecl->getMemoryFunctionKind()) )
+    switch(BId) {
+    case Builtin::BImemcpy:
+    case Builtin::BImemmove:
+    case Builtin::BIstrncpy:
+    case Builtin::BIstrncat:
+      return TaintPropagationRule(1, 2, 0, true);
+    case Builtin::BIstrlcpy:
+    case Builtin::BIstrlcat:
+      return TaintPropagationRule(1, 2, 0, false);
+    case Builtin::BIstrndup:
+      return TaintPropagationRule(0, 1, ReturnValueIndex);
+
+    default:
+      break;
+    };
+
+  // Process all other functions which could be defined as builtins.
+  if (Rule.isNull()) {
+    if (C.isCLibraryFunction(FDecl, "snprintf") ||
+        C.isCLibraryFunction(FDecl, "sprintf"))
+      return TaintPropagationRule(InvalidArgIndex, 0, true);
+    else if (C.isCLibraryFunction(FDecl, "strcpy") ||
+             C.isCLibraryFunction(FDecl, "stpcpy") ||
+             C.isCLibraryFunction(FDecl, "strcat"))
+      return TaintPropagationRule(1, 0, true);
+    else if (C.isCLibraryFunction(FDecl, "bcopy"))
+      return TaintPropagationRule(0, 2, 1, false);
+    else if (C.isCLibraryFunction(FDecl, "strdup") ||
+             C.isCLibraryFunction(FDecl, "strdupa"))
+      return TaintPropagationRule(0, ReturnValueIndex);
+    else if (C.isCLibraryFunction(FDecl, "wcsdup"))
+      return TaintPropagationRule(0, ReturnValueIndex);
+  }
+
+  // Skipping the following functions, since they might be used for cleansing
+  // or smart memory copy:
+  // - memccpy - copying until hitting a special character.
+
+  return TaintPropagationRule();
+}
+
+void GenericTaintChecker::checkPreStmt(const CallExpr *CE,
+                                       CheckerContext &C) const {
+  // Check for errors first.
+  if (checkPre(CE, C))
+    return;
+
+  // Add taint second.
+  addSourcesPre(CE, C);
+}
+
+void GenericTaintChecker::checkPostStmt(const CallExpr *CE,
+                                        CheckerContext &C) const {
+  if (propagateFromPre(CE, C))
+    return;
+  addSourcesPost(CE, C);
+}
+
+void GenericTaintChecker::addSourcesPre(const CallExpr *CE,
+                                        CheckerContext &C) const {
+  ProgramStateRef State = 0;
+  const FunctionDecl *FDecl = C.getCalleeDecl(CE);
+  if (!FDecl || FDecl->getKind() != Decl::Function)
+    return;
+
+  StringRef Name = C.getCalleeName(FDecl);
+  if (Name.empty())
+    return;
+
+  // First, try generating a propagation rule for this function.
+  TaintPropagationRule Rule =
+    TaintPropagationRule::getTaintPropagationRule(FDecl, Name, C);
+  if (!Rule.isNull()) {
+    State = Rule.process(CE, C);
+    if (!State)
+      return;
+    C.addTransition(State);
+    return;
+  }
+
+  // Otherwise, check if we have custom pre-processing implemented.
+  FnCheck evalFunction = llvm::StringSwitch<FnCheck>(Name)
+    .Case("fscanf", &GenericTaintChecker::preFscanf)
+    .Default(0);
+  // Check and evaluate the call.
+  if (evalFunction)
+    State = (this->*evalFunction)(CE, C);
+  if (!State)
+    return;
+  C.addTransition(State);
+
+}
+
+bool GenericTaintChecker::propagateFromPre(const CallExpr *CE,
+                                           CheckerContext &C) const {
+  ProgramStateRef State = C.getState();
+
+  // Depending on what was tainted at pre-visit, we determined a set of
+  // arguments which should be tainted after the function returns. These are
+  // stored in the state as TaintArgsOnPostVisit set.
+  TaintArgsOnPostVisitTy TaintArgs = State->get<TaintArgsOnPostVisit>();
+  if (TaintArgs.isEmpty())
+    return false;
+
+  for (llvm::ImmutableSet<unsigned>::iterator
+         I = TaintArgs.begin(), E = TaintArgs.end(); I != E; ++I) {
+    unsigned ArgNum  = *I;
+
+    // Special handling for the tainted return value.
+    if (ArgNum == ReturnValueIndex) {
+      State = State->addTaint(CE, C.getLocationContext());
+      continue;
+    }
+
+    // The arguments are pointer arguments. The data they are pointing at is
+    // tainted after the call.
+    if (CE->getNumArgs() < (ArgNum + 1))
+      return false;
+    const Expr* Arg = CE->getArg(ArgNum);
+    SymbolRef Sym = getPointedToSymbol(C, Arg);
+    if (Sym)
+      State = State->addTaint(Sym);
+  }
+
+  // Clear up the taint info from the state.
+  State = State->remove<TaintArgsOnPostVisit>();
+
+  if (State != C.getState()) {
+    C.addTransition(State);
+    return true;
+  }
+  return false;
+}
+
+void GenericTaintChecker::addSourcesPost(const CallExpr *CE,
+                                         CheckerContext &C) const {
+  // Define the attack surface.
+  // Set the evaluation function by switching on the callee name.
+  const FunctionDecl *FDecl = C.getCalleeDecl(CE);
+  if (!FDecl || FDecl->getKind() != Decl::Function)
+    return;
+
+  StringRef Name = C.getCalleeName(FDecl);
+  if (Name.empty())
+    return;
+  FnCheck evalFunction = llvm::StringSwitch<FnCheck>(Name)
+    .Case("scanf", &GenericTaintChecker::postScanf)
+    // TODO: Add support for vfscanf & family.
+    .Case("getchar", &GenericTaintChecker::postRetTaint)
+    .Case("getchar_unlocked", &GenericTaintChecker::postRetTaint)
+    .Case("getenv", &GenericTaintChecker::postRetTaint)
+    .Case("fopen", &GenericTaintChecker::postRetTaint)
+    .Case("fdopen", &GenericTaintChecker::postRetTaint)
+    .Case("freopen", &GenericTaintChecker::postRetTaint)
+    .Case("getch", &GenericTaintChecker::postRetTaint)
+    .Case("wgetch", &GenericTaintChecker::postRetTaint)
+    .Case("socket", &GenericTaintChecker::postSocket)
+    .Default(0);
+
+  // If the callee isn't defined, it is not of security concern.
+  // Check and evaluate the call.
+  ProgramStateRef State = 0;
+  if (evalFunction)
+    State = (this->*evalFunction)(CE, C);
+  if (!State)
+    return;
+
+  C.addTransition(State);
+}
+
+bool GenericTaintChecker::checkPre(const CallExpr *CE, CheckerContext &C) const{
+
+  if (checkUncontrolledFormatString(CE, C))
+    return true;
+
+  const FunctionDecl *FDecl = C.getCalleeDecl(CE);
+  if (!FDecl || FDecl->getKind() != Decl::Function)
+    return false;
+
+  StringRef Name = C.getCalleeName(FDecl);
+  if (Name.empty())
+    return false;
+
+  if (checkSystemCall(CE, Name, C))
+    return true;
+
+  if (checkTaintedBufferSize(CE, FDecl, C))
+    return true;
+
+  return false;
+}
+
+SymbolRef GenericTaintChecker::getPointedToSymbol(CheckerContext &C,
+                                                  const Expr* Arg) {
+  ProgramStateRef State = C.getState();
+  SVal AddrVal = State->getSVal(Arg->IgnoreParens(), C.getLocationContext());
+  if (AddrVal.isUnknownOrUndef())
+    return 0;
+
+  Optional<Loc> AddrLoc = AddrVal.getAs<Loc>();
+  if (!AddrLoc)
+    return 0;
+
+  const PointerType *ArgTy =
+    dyn_cast<PointerType>(Arg->getType().getCanonicalType().getTypePtr());
+  SVal Val = State->getSVal(*AddrLoc,
+                            ArgTy ? ArgTy->getPointeeType(): QualType());
+  return Val.getAsSymbol();
+}
+
+ProgramStateRef 
+GenericTaintChecker::TaintPropagationRule::process(const CallExpr *CE,
+                                                   CheckerContext &C) const {
+  ProgramStateRef State = C.getState();
+
+  // Check for taint in arguments.
+  bool IsTainted = false;
+  for (ArgVector::const_iterator I = SrcArgs.begin(),
+                                 E = SrcArgs.end(); I != E; ++I) {
+    unsigned ArgNum = *I;
+
+    if (ArgNum == InvalidArgIndex) {
+      // Check if any of the arguments is tainted, but skip the
+      // destination arguments.
+      for (unsigned int i = 0; i < CE->getNumArgs(); ++i) {
+        if (isDestinationArgument(i))
+          continue;
+        if ((IsTainted = isTaintedOrPointsToTainted(CE->getArg(i), State, C)))
+          break;
+      }
+      break;
+    }
+
+    if (CE->getNumArgs() < (ArgNum + 1))
+      return State;
+    if ((IsTainted = isTaintedOrPointsToTainted(CE->getArg(ArgNum), State, C)))
+      break;
+  }
+  if (!IsTainted)
+    return State;
+
+  // Mark the arguments which should be tainted after the function returns.
+  for (ArgVector::const_iterator I = DstArgs.begin(),
+                                 E = DstArgs.end(); I != E; ++I) {
+    unsigned ArgNum = *I;
+
+    // Should we mark all arguments as tainted?
+    if (ArgNum == InvalidArgIndex) {
+      // For all pointer and references that were passed in:
+      //   If they are not pointing to const data, mark data as tainted.
+      //   TODO: So far we are just going one level down; ideally we'd need to
+      //         recurse here.
+      for (unsigned int i = 0; i < CE->getNumArgs(); ++i) {
+        const Expr *Arg = CE->getArg(i);
+        // Process pointer argument.
+        const Type *ArgTy = Arg->getType().getTypePtr();
+        QualType PType = ArgTy->getPointeeType();
+        if ((!PType.isNull() && !PType.isConstQualified())
+            || (ArgTy->isReferenceType() && !Arg->getType().isConstQualified()))
+          State = State->add<TaintArgsOnPostVisit>(i);
+      }
+      continue;
+    }
+
+    // Should mark the return value?
+    if (ArgNum == ReturnValueIndex) {
+      State = State->add<TaintArgsOnPostVisit>(ReturnValueIndex);
+      continue;
+    }
+
+    // Mark the given argument.
+    assert(ArgNum < CE->getNumArgs());
+    State = State->add<TaintArgsOnPostVisit>(ArgNum);
+  }
+
+  return State;
+}
+
+
+// If argument 0 (file descriptor) is tainted, all arguments except for arg 0
+// and arg 1 should get taint.
+ProgramStateRef GenericTaintChecker::preFscanf(const CallExpr *CE,
+                                                   CheckerContext &C) const {
+  assert(CE->getNumArgs() >= 2);
+  ProgramStateRef State = C.getState();
+
+  // Check is the file descriptor is tainted.
+  if (State->isTainted(CE->getArg(0), C.getLocationContext()) ||
+      isStdin(CE->getArg(0), C)) {
+    // All arguments except for the first two should get taint.
+    for (unsigned int i = 2; i < CE->getNumArgs(); ++i)
+        State = State->add<TaintArgsOnPostVisit>(i);
+    return State;
+  }
+
+  return 0;
+}
+
+
+// If argument 0(protocol domain) is network, the return value should get taint.
+ProgramStateRef GenericTaintChecker::postSocket(const CallExpr *CE,
+                                                CheckerContext &C) const {
+  ProgramStateRef State = C.getState();
+  if (CE->getNumArgs() < 3)
+    return State;
+
+  SourceLocation DomLoc = CE->getArg(0)->getExprLoc();
+  StringRef DomName = C.getMacroNameOrSpelling(DomLoc);
+  // White list the internal communication protocols.
+  if (DomName.equals("AF_SYSTEM") || DomName.equals("AF_LOCAL") ||
+      DomName.equals("AF_UNIX") || DomName.equals("AF_RESERVED_36"))
+    return State;
+  State = State->addTaint(CE, C.getLocationContext());
+  return State;
+}
+
+ProgramStateRef GenericTaintChecker::postScanf(const CallExpr *CE,
+                                                   CheckerContext &C) const {
+  ProgramStateRef State = C.getState();
+  if (CE->getNumArgs() < 2)
+    return State;
+
+  // All arguments except for the very first one should get taint.
+  for (unsigned int i = 1; i < CE->getNumArgs(); ++i) {
+    // The arguments are pointer arguments. The data they are pointing at is
+    // tainted after the call.
+    const Expr* Arg = CE->getArg(i);
+        SymbolRef Sym = getPointedToSymbol(C, Arg);
+    if (Sym)
+      State = State->addTaint(Sym);
+  }
+  return State;
+}
+
+ProgramStateRef GenericTaintChecker::postRetTaint(const CallExpr *CE,
+                                                  CheckerContext &C) const {
+  return C.getState()->addTaint(CE, C.getLocationContext());
+}
+
+bool GenericTaintChecker::isStdin(const Expr *E, CheckerContext &C) {
+  ProgramStateRef State = C.getState();
+  SVal Val = State->getSVal(E, C.getLocationContext());
+
+  // stdin is a pointer, so it would be a region.
+  const MemRegion *MemReg = Val.getAsRegion();
+
+  // The region should be symbolic, we do not know it's value.
+  const SymbolicRegion *SymReg = dyn_cast_or_null<SymbolicRegion>(MemReg);
+  if (!SymReg)
+    return false;
+
+  // Get it's symbol and find the declaration region it's pointing to.
+  const SymbolRegionValue *Sm =dyn_cast<SymbolRegionValue>(SymReg->getSymbol());
+  if (!Sm)
+    return false;
+  const DeclRegion *DeclReg = dyn_cast_or_null<DeclRegion>(Sm->getRegion());
+  if (!DeclReg)
+    return false;
+
+  // This region corresponds to a declaration, find out if it's a global/extern
+  // variable named stdin with the proper type.
+  if (const VarDecl *D = dyn_cast_or_null<VarDecl>(DeclReg->getDecl())) {
+    D = D->getCanonicalDecl();
+    if ((D->getName().find("stdin") != StringRef::npos) && D->isExternC())
+        if (const PointerType * PtrTy =
+              dyn_cast<PointerType>(D->getType().getTypePtr()))
+          if (PtrTy->getPointeeType() == C.getASTContext().getFILEType())
+            return true;
+  }
+  return false;
+}
+
+static bool getPrintfFormatArgumentNum(const CallExpr *CE,
+                                       const CheckerContext &C,
+                                       unsigned int &ArgNum) {
+  // Find if the function contains a format string argument.
+  // Handles: fprintf, printf, sprintf, snprintf, vfprintf, vprintf, vsprintf,
+  // vsnprintf, syslog, custom annotated functions.
+  const FunctionDecl *FDecl = C.getCalleeDecl(CE);
+  if (!FDecl)
+    return false;
+  for (specific_attr_iterator<FormatAttr>
+         i = FDecl->specific_attr_begin<FormatAttr>(),
+         e = FDecl->specific_attr_end<FormatAttr>(); i != e ; ++i) {
+
+    const FormatAttr *Format = *i;
+    ArgNum = Format->getFormatIdx() - 1;
+    if ((Format->getType() == "printf") && CE->getNumArgs() > ArgNum)
+      return true;
+  }
+
+  // Or if a function is named setproctitle (this is a heuristic).
+  if (C.getCalleeName(CE).find("setproctitle") != StringRef::npos) {
+    ArgNum = 0;
+    return true;
+  }
+
+  return false;
+}
+
+bool GenericTaintChecker::generateReportIfTainted(const Expr *E,
+                                                  const char Msg[],
+                                                  CheckerContext &C) const {
+  assert(E);
+
+  // Check for taint.
+  ProgramStateRef State = C.getState();
+  if (!State->isTainted(getPointedToSymbol(C, E)) &&
+      !State->isTainted(E, C.getLocationContext()))
+    return false;
+
+  // Generate diagnostic.
+  if (ExplodedNode *N = C.addTransition()) {
+    initBugType();
+    BugReport *report = new BugReport(*BT, Msg, N);
+    report->addRange(E->getSourceRange());
+    C.emitReport(report);
+    return true;
+  }
+  return false;
+}
+
+bool GenericTaintChecker::checkUncontrolledFormatString(const CallExpr *CE,
+                                                        CheckerContext &C) const{
+  // Check if the function contains a format string argument.
+  unsigned int ArgNum = 0;
+  if (!getPrintfFormatArgumentNum(CE, C, ArgNum))
+    return false;
+
+  // If either the format string content or the pointer itself are tainted, warn.
+  if (generateReportIfTainted(CE->getArg(ArgNum),
+                              MsgUncontrolledFormatString, C))
+    return true;
+  return false;
+}
+
+bool GenericTaintChecker::checkSystemCall(const CallExpr *CE,
+                                          StringRef Name,
+                                          CheckerContext &C) const {
+  // TODO: It might make sense to run this check on demand. In some cases, 
+  // we should check if the environment has been cleansed here. We also might 
+  // need to know if the user was reset before these calls(seteuid).
+  unsigned ArgNum = llvm::StringSwitch<unsigned>(Name)
+    .Case("system", 0)
+    .Case("popen", 0)
+    .Case("execl", 0)
+    .Case("execle", 0)
+    .Case("execlp", 0)
+    .Case("execv", 0)
+    .Case("execvp", 0)
+    .Case("execvP", 0)
+    .Case("execve", 0)
+    .Case("dlopen", 0)
+    .Default(UINT_MAX);
+
+  if (ArgNum == UINT_MAX || CE->getNumArgs() < (ArgNum + 1))
+    return false;
+
+  if (generateReportIfTainted(CE->getArg(ArgNum),
+                              MsgSanitizeSystemArgs, C))
+    return true;
+
+  return false;
+}
+
+// TODO: Should this check be a part of the CString checker?
+// If yes, should taint be a global setting?
+bool GenericTaintChecker::checkTaintedBufferSize(const CallExpr *CE,
+                                                 const FunctionDecl *FDecl,
+                                                 CheckerContext &C) const {
+  // If the function has a buffer size argument, set ArgNum.
+  unsigned ArgNum = InvalidArgIndex;
+  unsigned BId = 0;
+  if ( (BId = FDecl->getMemoryFunctionKind()) )
+    switch(BId) {
+    case Builtin::BImemcpy:
+    case Builtin::BImemmove:
+    case Builtin::BIstrncpy:
+      ArgNum = 2;
+      break;
+    case Builtin::BIstrndup:
+      ArgNum = 1;
+      break;
+    default:
+      break;
+    };
+
+  if (ArgNum == InvalidArgIndex) {
+    if (C.isCLibraryFunction(FDecl, "malloc") ||
+        C.isCLibraryFunction(FDecl, "calloc") ||
+        C.isCLibraryFunction(FDecl, "alloca"))
+      ArgNum = 0;
+    else if (C.isCLibraryFunction(FDecl, "memccpy"))
+      ArgNum = 3;
+    else if (C.isCLibraryFunction(FDecl, "realloc"))
+      ArgNum = 1;
+    else if (C.isCLibraryFunction(FDecl, "bcopy"))
+      ArgNum = 2;
+  }
+
+  if (ArgNum != InvalidArgIndex && CE->getNumArgs() > ArgNum &&
+      generateReportIfTainted(CE->getArg(ArgNum), MsgTaintedBufferSize, C))
+    return true;
+
+  return false;
+}
+
+void ento::registerGenericTaintChecker(CheckerManager &mgr) {
+  mgr.registerChecker<GenericTaintChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp
new file mode 100644
index 0000000..271ba47
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp
@@ -0,0 +1,748 @@
+//==- IdempotentOperationChecker.cpp - Idempotent Operations ----*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a set of path-sensitive checks for idempotent and/or
+// tautological operations. Each potential operation is checked along all paths
+// to see if every path results in a pointless operation.
+//                 +-------------------------------------------+
+//                 |Table of idempotent/tautological operations|
+//                 +-------------------------------------------+
+//+--------------------------------------------------------------------------+
+//|Operator | x op x | x op 1 | 1 op x | x op 0 | 0 op x | x op ~0 | ~0 op x |
+//+--------------------------------------------------------------------------+
+//  +, +=   |        |        |        |   x    |   x    |         |
+//  -, -=   |        |        |        |   x    |   -x   |         |
+//  *, *=   |        |   x    |   x    |   0    |   0    |         |
+//  /, /=   |   1    |   x    |        |  N/A   |   0    |         |
+//  &, &=   |   x    |        |        |   0    |   0    |   x     |    x
+//  |, |=   |   x    |        |        |   x    |   x    |   ~0    |    ~0
+//  ^, ^=   |   0    |        |        |   x    |   x    |         |
+//  <<, <<= |        |        |        |   x    |   0    |         |
+//  >>, >>= |        |        |        |   x    |   0    |         |
+//  ||      |   1    |   1    |   1    |   x    |   x    |   1     |    1
+//  &&      |   1    |   x    |   x    |   0    |   0    |   x     |    x
+//  =       |   x    |        |        |        |        |         |
+//  ==      |   1    |        |        |        |        |         |
+//  >=      |   1    |        |        |        |        |         |
+//  <=      |   1    |        |        |        |        |         |
+//  >       |   0    |        |        |        |        |         |
+//  <       |   0    |        |        |        |        |         |
+//  !=      |   0    |        |        |        |        |         |
+//===----------------------------------------------------------------------===//
+//
+// Things TODO:
+// - Improved error messages
+// - Handle mixed assumptions (which assumptions can belong together?)
+// - Finer grained false positive control (levels)
+// - Handling ~0 values
+
+#include "ClangSACheckers.h"
+#include "clang/AST/Stmt.h"
+#include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h"
+#include "clang/Analysis/Analyses/PseudoConstantAnalysis.h"
+#include "clang/Analysis/CFGStmtMap.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class IdempotentOperationChecker
+  : public Checker<check::PreStmt<BinaryOperator>,
+                     check::PostStmt<BinaryOperator>,
+                     check::EndAnalysis> {
+public:
+  void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
+  void checkPostStmt(const BinaryOperator *B, CheckerContext &C) const;
+  void checkEndAnalysis(ExplodedGraph &G, BugReporter &B,ExprEngine &Eng) const;
+
+private:
+  // Our assumption about a particular operation.
+  enum Assumption { Possible = 0, Impossible, Equal, LHSis1, RHSis1, LHSis0,
+      RHSis0 };
+
+  static void UpdateAssumption(Assumption &A, const Assumption &New);
+
+  // False positive reduction methods
+  static bool isSelfAssign(const Expr *LHS, const Expr *RHS);
+  static bool isUnused(const Expr *E, AnalysisDeclContext *AC);
+  static bool isTruncationExtensionAssignment(const Expr *LHS,
+                                              const Expr *RHS);
+  static bool pathWasCompletelyAnalyzed(AnalysisDeclContext *AC,
+                                        const CFGBlock *CB,
+                                        const CoreEngine &CE);
+  static bool CanVary(const Expr *Ex,
+                      AnalysisDeclContext *AC);
+  static bool isConstantOrPseudoConstant(const DeclRefExpr *DR,
+                                         AnalysisDeclContext *AC);
+  static bool containsNonLocalVarDecl(const Stmt *S);
+
+  // Hash table and related data structures
+  struct BinaryOperatorData {
+    BinaryOperatorData() : assumption(Possible) {}
+
+    Assumption assumption;
+    ExplodedNodeSet explodedNodes; // Set of ExplodedNodes that refer to a
+                                   // BinaryOperator
+  };
+  typedef llvm::DenseMap<const BinaryOperator *, BinaryOperatorData>
+      AssumptionMap;
+  mutable AssumptionMap hash;
+  mutable OwningPtr<BugType> BT;
+};
+}
+
+void IdempotentOperationChecker::checkPreStmt(const BinaryOperator *B,
+                                              CheckerContext &C) const {
+  // Find or create an entry in the hash for this BinaryOperator instance.
+  // If we haven't done a lookup before, it will get default initialized to
+  // 'Possible'. At this stage we do not store the ExplodedNode, as it has not
+  // been created yet.
+  BinaryOperatorData &Data = hash[B];
+  Assumption &A = Data.assumption;
+  AnalysisDeclContext *AC = C.getCurrentAnalysisDeclContext();
+
+  // If we already have visited this node on a path that does not contain an
+  // idempotent operation, return immediately.
+  if (A == Impossible)
+    return;
+
+  // Retrieve both sides of the operator and determine if they can vary (which
+  // may mean this is a false positive.
+  const Expr *LHS = B->getLHS();
+  const Expr *RHS = B->getRHS();
+
+  // At this stage we can calculate whether each side contains a false positive
+  // that applies to all operators. We only need to calculate this the first
+  // time.
+  bool LHSContainsFalsePositive = false, RHSContainsFalsePositive = false;
+  if (A == Possible) {
+    // An expression contains a false positive if it can't vary, or if it
+    // contains a known false positive VarDecl.
+    LHSContainsFalsePositive = !CanVary(LHS, AC)
+        || containsNonLocalVarDecl(LHS);
+    RHSContainsFalsePositive = !CanVary(RHS, AC)
+        || containsNonLocalVarDecl(RHS);
+  }
+
+  ProgramStateRef state = C.getState();
+  const LocationContext *LCtx = C.getLocationContext();
+  SVal LHSVal = state->getSVal(LHS, LCtx);
+  SVal RHSVal = state->getSVal(RHS, LCtx);
+
+  // If either value is unknown, we can't be 100% sure of all paths.
+  if (LHSVal.isUnknownOrUndef() || RHSVal.isUnknownOrUndef()) {
+    A = Impossible;
+    return;
+  }
+  BinaryOperator::Opcode Op = B->getOpcode();
+
+  // Dereference the LHS SVal if this is an assign operation
+  switch (Op) {
+  default:
+    break;
+
+  // Fall through intentional
+  case BO_AddAssign:
+  case BO_SubAssign:
+  case BO_MulAssign:
+  case BO_DivAssign:
+  case BO_AndAssign:
+  case BO_OrAssign:
+  case BO_XorAssign:
+  case BO_ShlAssign:
+  case BO_ShrAssign:
+  case BO_Assign:
+  // Assign statements have one extra level of indirection
+    if (!LHSVal.getAs<Loc>()) {
+      A = Impossible;
+      return;
+    }
+    LHSVal = state->getSVal(LHSVal.castAs<Loc>(), LHS->getType());
+  }
+
+
+  // We now check for various cases which result in an idempotent operation.
+
+  // x op x
+  switch (Op) {
+  default:
+    break; // We don't care about any other operators.
+
+  // Fall through intentional
+  case BO_Assign:
+    // x Assign x can be used to silence unused variable warnings intentionally.
+    // If this is a self assignment and the variable is referenced elsewhere,
+    // and the assignment is not a truncation or extension, then it is a false
+    // positive.
+    if (isSelfAssign(LHS, RHS)) {
+      if (!isUnused(LHS, AC) && !isTruncationExtensionAssignment(LHS, RHS)) {
+        UpdateAssumption(A, Equal);
+        return;
+      }
+      else {
+        A = Impossible;
+        return;
+      }
+    }
+
+  case BO_SubAssign:
+  case BO_DivAssign:
+  case BO_AndAssign:
+  case BO_OrAssign:
+  case BO_XorAssign:
+  case BO_Sub:
+  case BO_Div:
+  case BO_And:
+  case BO_Or:
+  case BO_Xor:
+  case BO_LOr:
+  case BO_LAnd:
+  case BO_EQ:
+  case BO_NE:
+    if (LHSVal != RHSVal || LHSContainsFalsePositive
+        || RHSContainsFalsePositive)
+      break;
+    UpdateAssumption(A, Equal);
+    return;
+  }
+
+  // x op 1
+  switch (Op) {
+   default:
+     break; // We don't care about any other operators.
+
+   // Fall through intentional
+   case BO_MulAssign:
+   case BO_DivAssign:
+   case BO_Mul:
+   case BO_Div:
+   case BO_LOr:
+   case BO_LAnd:
+     if (!RHSVal.isConstant(1) || RHSContainsFalsePositive)
+       break;
+     UpdateAssumption(A, RHSis1);
+     return;
+  }
+
+  // 1 op x
+  switch (Op) {
+  default:
+    break; // We don't care about any other operators.
+
+  // Fall through intentional
+  case BO_MulAssign:
+  case BO_Mul:
+  case BO_LOr:
+  case BO_LAnd:
+    if (!LHSVal.isConstant(1) || LHSContainsFalsePositive)
+      break;
+    UpdateAssumption(A, LHSis1);
+    return;
+  }
+
+  // x op 0
+  switch (Op) {
+  default:
+    break; // We don't care about any other operators.
+
+  // Fall through intentional
+  case BO_AddAssign:
+  case BO_SubAssign:
+  case BO_MulAssign:
+  case BO_AndAssign:
+  case BO_OrAssign:
+  case BO_XorAssign:
+  case BO_Add:
+  case BO_Sub:
+  case BO_Mul:
+  case BO_And:
+  case BO_Or:
+  case BO_Xor:
+  case BO_Shl:
+  case BO_Shr:
+  case BO_LOr:
+  case BO_LAnd:
+    if (!RHSVal.isConstant(0) || RHSContainsFalsePositive)
+      break;
+    UpdateAssumption(A, RHSis0);
+    return;
+  }
+
+  // 0 op x
+  switch (Op) {
+  default:
+    break; // We don't care about any other operators.
+
+  // Fall through intentional
+  //case BO_AddAssign: // Common false positive
+  case BO_SubAssign: // Check only if unsigned
+  case BO_MulAssign:
+  case BO_DivAssign:
+  case BO_AndAssign:
+  //case BO_OrAssign: // Common false positive
+  //case BO_XorAssign: // Common false positive
+  case BO_ShlAssign:
+  case BO_ShrAssign:
+  case BO_Add:
+  case BO_Sub:
+  case BO_Mul:
+  case BO_Div:
+  case BO_And:
+  case BO_Or:
+  case BO_Xor:
+  case BO_Shl:
+  case BO_Shr:
+  case BO_LOr:
+  case BO_LAnd:
+    if (!LHSVal.isConstant(0) || LHSContainsFalsePositive)
+      break;
+    UpdateAssumption(A, LHSis0);
+    return;
+  }
+
+  // If we get to this point, there has been a valid use of this operation.
+  A = Impossible;
+}
+
+// At the post visit stage, the predecessor ExplodedNode will be the
+// BinaryOperator that was just created. We use this hook to collect the
+// ExplodedNode.
+void IdempotentOperationChecker::checkPostStmt(const BinaryOperator *B,
+                                               CheckerContext &C) const {
+  // Add the ExplodedNode we just visited
+  BinaryOperatorData &Data = hash[B];
+
+  const Stmt *predStmt =
+      C.getPredecessor()->getLocation().castAs<StmtPoint>().getStmt();
+
+  // Ignore implicit calls to setters.
+  if (!isa<BinaryOperator>(predStmt))
+    return;
+
+  Data.explodedNodes.Add(C.getPredecessor());
+}
+
+void IdempotentOperationChecker::checkEndAnalysis(ExplodedGraph &G,
+                                                  BugReporter &BR,
+                                                  ExprEngine &Eng) const {
+  if (!BT)
+    BT.reset(new BugType("Idempotent operation", "Dead code"));
+
+  // Iterate over the hash to see if we have any paths with definite
+  // idempotent operations.
+  for (AssumptionMap::const_iterator i = hash.begin(); i != hash.end(); ++i) {
+    // Unpack the hash contents
+    const BinaryOperatorData &Data = i->second;
+    const Assumption &A = Data.assumption;
+    const ExplodedNodeSet &ES = Data.explodedNodes;
+
+    // If there are no nodes accosted with the expression, nothing to report.
+    // FIXME: This is possible because the checker does part of processing in
+    // checkPreStmt and part in checkPostStmt.
+    if (ES.begin() == ES.end())
+      continue;
+
+    const BinaryOperator *B = i->first;
+
+    if (A == Impossible)
+      continue;
+
+    // If the analyzer did not finish, check to see if we can still emit this
+    // warning
+    if (Eng.hasWorkRemaining()) {
+      // If we can trace back
+      AnalysisDeclContext *AC = (*ES.begin())->getLocationContext()
+                                         ->getAnalysisDeclContext();
+      if (!pathWasCompletelyAnalyzed(AC,
+                                     AC->getCFGStmtMap()->getBlock(B),
+                                     Eng.getCoreEngine()))
+        continue;
+    }
+
+    // Select the error message and SourceRanges to report.
+    SmallString<128> buf;
+    llvm::raw_svector_ostream os(buf);
+    bool LHSRelevant = false, RHSRelevant = false;
+    switch (A) {
+    case Equal:
+      LHSRelevant = true;
+      RHSRelevant = true;
+      if (B->getOpcode() == BO_Assign)
+        os << "Assigned value is always the same as the existing value";
+      else
+        os << "Both operands to '" << B->getOpcodeStr()
+           << "' always have the same value";
+      break;
+    case LHSis1:
+      LHSRelevant = true;
+      os << "The left operand to '" << B->getOpcodeStr() << "' is always 1";
+      break;
+    case RHSis1:
+      RHSRelevant = true;
+      os << "The right operand to '" << B->getOpcodeStr() << "' is always 1";
+      break;
+    case LHSis0:
+      LHSRelevant = true;
+      os << "The left operand to '" << B->getOpcodeStr() << "' is always 0";
+      break;
+    case RHSis0:
+      RHSRelevant = true;
+      os << "The right operand to '" << B->getOpcodeStr() << "' is always 0";
+      break;
+    case Possible:
+      llvm_unreachable("Operation was never marked with an assumption");
+    case Impossible:
+      llvm_unreachable(0);
+    }
+
+    // Add a report for each ExplodedNode
+    for (ExplodedNodeSet::iterator I = ES.begin(), E = ES.end(); I != E; ++I) {
+      BugReport *report = new BugReport(*BT, os.str(), *I);
+
+      // Add source ranges and visitor hooks
+      if (LHSRelevant) {
+        const Expr *LHS = i->first->getLHS();
+        report->addRange(LHS->getSourceRange());
+        FindLastStoreBRVisitor::registerStatementVarDecls(*report, LHS, false);
+      }
+      if (RHSRelevant) {
+        const Expr *RHS = i->first->getRHS();
+        report->addRange(i->first->getRHS()->getSourceRange());
+        FindLastStoreBRVisitor::registerStatementVarDecls(*report, RHS, false);
+      }
+
+      BR.emitReport(report);
+    }
+  }
+
+  hash.clear();
+}
+
+// Updates the current assumption given the new assumption
+inline void IdempotentOperationChecker::UpdateAssumption(Assumption &A,
+                                                        const Assumption &New) {
+// If the assumption is the same, there is nothing to do
+  if (A == New)
+    return;
+
+  switch (A) {
+  // If we don't currently have an assumption, set it
+  case Possible:
+    A = New;
+    return;
+
+  // If we have determined that a valid state happened, ignore the new
+  // assumption.
+  case Impossible:
+    return;
+
+  // Any other case means that we had a different assumption last time. We don't
+  // currently support mixing assumptions for diagnostic reasons, so we set
+  // our assumption to be impossible.
+  default:
+    A = Impossible;
+    return;
+  }
+}
+
+// Check for a statement where a variable is self assigned to possibly avoid an
+// unused variable warning.
+bool IdempotentOperationChecker::isSelfAssign(const Expr *LHS, const Expr *RHS) {
+  LHS = LHS->IgnoreParenCasts();
+  RHS = RHS->IgnoreParenCasts();
+
+  const DeclRefExpr *LHS_DR = dyn_cast<DeclRefExpr>(LHS);
+  if (!LHS_DR)
+    return false;
+
+  const VarDecl *VD = dyn_cast<VarDecl>(LHS_DR->getDecl());
+  if (!VD)
+    return false;
+
+  const DeclRefExpr *RHS_DR = dyn_cast<DeclRefExpr>(RHS);
+  if (!RHS_DR)
+    return false;
+
+  if (VD != RHS_DR->getDecl())
+    return false;
+
+  return true;
+}
+
+// Returns true if the Expr points to a VarDecl that is not read anywhere
+// outside of self-assignments.
+bool IdempotentOperationChecker::isUnused(const Expr *E,
+                                          AnalysisDeclContext *AC) {
+  if (!E)
+    return false;
+
+  const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E->IgnoreParenCasts());
+  if (!DR)
+    return false;
+
+  const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl());
+  if (!VD)
+    return false;
+
+  if (AC->getPseudoConstantAnalysis()->wasReferenced(VD))
+    return false;
+
+  return true;
+}
+
+// Check for self casts truncating/extending a variable
+bool IdempotentOperationChecker::isTruncationExtensionAssignment(
+                                                              const Expr *LHS,
+                                                              const Expr *RHS) {
+
+  const DeclRefExpr *LHS_DR = dyn_cast<DeclRefExpr>(LHS->IgnoreParenCasts());
+  if (!LHS_DR)
+    return false;
+
+  const VarDecl *VD = dyn_cast<VarDecl>(LHS_DR->getDecl());
+  if (!VD)
+    return false;
+
+  const DeclRefExpr *RHS_DR = dyn_cast<DeclRefExpr>(RHS->IgnoreParenCasts());
+  if (!RHS_DR)
+    return false;
+
+  if (VD != RHS_DR->getDecl())
+     return false;
+
+  return dyn_cast<DeclRefExpr>(RHS->IgnoreParenLValueCasts()) == NULL;
+}
+
+// Returns false if a path to this block was not completely analyzed, or true
+// otherwise.
+bool
+IdempotentOperationChecker::pathWasCompletelyAnalyzed(AnalysisDeclContext *AC,
+                                                      const CFGBlock *CB,
+                                                      const CoreEngine &CE) {
+
+  CFGReverseBlockReachabilityAnalysis *CRA = AC->getCFGReachablityAnalysis();
+  
+  // Test for reachability from any aborted blocks to this block
+  typedef CoreEngine::BlocksExhausted::const_iterator ExhaustedIterator;
+  for (ExhaustedIterator I = CE.blocks_exhausted_begin(),
+      E = CE.blocks_exhausted_end(); I != E; ++I) {
+    const BlockEdge &BE =  I->first;
+
+    // The destination block on the BlockEdge is the first block that was not
+    // analyzed. If we can reach this block from the aborted block, then this
+    // block was not completely analyzed.
+    //
+    // Also explicitly check if the current block is the destination block.
+    // While technically reachable, it means we aborted the analysis on
+    // a path that included that block.
+    const CFGBlock *destBlock = BE.getDst();
+    if (destBlock == CB || CRA->isReachable(destBlock, CB))
+      return false;
+  }
+
+  // Test for reachability from blocks we just gave up on.
+  typedef CoreEngine::BlocksAborted::const_iterator AbortedIterator;
+  for (AbortedIterator I = CE.blocks_aborted_begin(),
+       E = CE.blocks_aborted_end(); I != E; ++I) {
+    const CFGBlock *destBlock = I->first;
+    if (destBlock == CB || CRA->isReachable(destBlock, CB))
+      return false;
+  }
+  
+  // For the items still on the worklist, see if they are in blocks that
+  // can eventually reach 'CB'.
+  class VisitWL : public WorkList::Visitor {
+    const CFGStmtMap *CBM;
+    const CFGBlock *TargetBlock;
+    CFGReverseBlockReachabilityAnalysis &CRA;
+  public:
+    VisitWL(const CFGStmtMap *cbm, const CFGBlock *targetBlock,
+            CFGReverseBlockReachabilityAnalysis &cra)
+      : CBM(cbm), TargetBlock(targetBlock), CRA(cra) {}
+    virtual bool visit(const WorkListUnit &U) {
+      ProgramPoint P = U.getNode()->getLocation();
+      const CFGBlock *B = 0;
+      if (Optional<StmtPoint> SP = P.getAs<StmtPoint>()) {
+        B = CBM->getBlock(SP->getStmt());
+      } else if (Optional<BlockEdge> BE = P.getAs<BlockEdge>()) {
+        B = BE->getDst();
+      } else if (Optional<BlockEntrance> BEnt = P.getAs<BlockEntrance>()) {
+        B = BEnt->getBlock();
+      } else if (Optional<BlockExit> BExit = P.getAs<BlockExit>()) {
+        B = BExit->getBlock();
+      }
+      if (!B)
+        return true;
+      
+      return B == TargetBlock || CRA.isReachable(B, TargetBlock);
+    }
+  };
+  VisitWL visitWL(AC->getCFGStmtMap(), CB, *CRA);
+  // Were there any items in the worklist that could potentially reach
+  // this block?
+  if (CE.getWorkList()->visitItemsInWorkList(visitWL))
+    return false;
+
+  // Verify that this block is reachable from the entry block
+  if (!CRA->isReachable(&AC->getCFG()->getEntry(), CB))
+    return false;
+
+  // If we get to this point, there is no connection to the entry block or an
+  // aborted block. This path is unreachable and we can report the error.
+  return true;
+}
+
+// Recursive function that determines whether an expression contains any element
+// that varies. This could be due to a compile-time constant like sizeof. An
+// expression may also involve a variable that behaves like a constant. The
+// function returns true if the expression varies, and false otherwise.
+bool IdempotentOperationChecker::CanVary(const Expr *Ex,
+                                         AnalysisDeclContext *AC) {
+  // Parentheses and casts are irrelevant here
+  Ex = Ex->IgnoreParenCasts();
+
+  if (Ex->getLocStart().isMacroID())
+    return false;
+
+  switch (Ex->getStmtClass()) {
+  // Trivially true cases
+  case Stmt::ArraySubscriptExprClass:
+  case Stmt::MemberExprClass:
+  case Stmt::StmtExprClass:
+  case Stmt::CallExprClass:
+  case Stmt::VAArgExprClass:
+  case Stmt::ShuffleVectorExprClass:
+    return true;
+  default:
+    return true;
+
+  // Trivially false cases
+  case Stmt::IntegerLiteralClass:
+  case Stmt::CharacterLiteralClass:
+  case Stmt::FloatingLiteralClass:
+  case Stmt::PredefinedExprClass:
+  case Stmt::ImaginaryLiteralClass:
+  case Stmt::StringLiteralClass:
+  case Stmt::OffsetOfExprClass:
+  case Stmt::CompoundLiteralExprClass:
+  case Stmt::AddrLabelExprClass:
+  case Stmt::BinaryTypeTraitExprClass:
+  case Stmt::GNUNullExprClass:
+  case Stmt::InitListExprClass:
+  case Stmt::DesignatedInitExprClass:
+  case Stmt::BlockExprClass:
+    return false;
+
+  // Cases requiring custom logic
+  case Stmt::UnaryExprOrTypeTraitExprClass: {
+    const UnaryExprOrTypeTraitExpr *SE = 
+                       cast<const UnaryExprOrTypeTraitExpr>(Ex);
+    if (SE->getKind() != UETT_SizeOf)
+      return false;
+    return SE->getTypeOfArgument()->isVariableArrayType();
+  }
+  case Stmt::DeclRefExprClass:
+    // Check for constants/pseudoconstants
+    return !isConstantOrPseudoConstant(cast<DeclRefExpr>(Ex), AC);
+
+  // The next cases require recursion for subexpressions
+  case Stmt::BinaryOperatorClass: {
+    const BinaryOperator *B = cast<const BinaryOperator>(Ex);
+
+    // Exclude cases involving pointer arithmetic.  These are usually
+    // false positives.
+    if (B->getOpcode() == BO_Sub || B->getOpcode() == BO_Add)
+      if (B->getLHS()->getType()->getAs<PointerType>())
+        return false;
+
+    return CanVary(B->getRHS(), AC)
+        || CanVary(B->getLHS(), AC);
+   }
+  case Stmt::UnaryOperatorClass: {
+    const UnaryOperator *U = cast<const UnaryOperator>(Ex);
+    // Handle trivial case first
+    switch (U->getOpcode()) {
+    case UO_Extension:
+      return false;
+    default:
+      return CanVary(U->getSubExpr(), AC);
+    }
+  }
+  case Stmt::ChooseExprClass:
+    return CanVary(cast<const ChooseExpr>(Ex)->getChosenSubExpr(
+        AC->getASTContext()), AC);
+  case Stmt::ConditionalOperatorClass:
+  case Stmt::BinaryConditionalOperatorClass:
+    return CanVary(cast<AbstractConditionalOperator>(Ex)->getCond(), AC);
+  }
+}
+
+// Returns true if a DeclRefExpr is or behaves like a constant.
+bool IdempotentOperationChecker::isConstantOrPseudoConstant(
+                                                          const DeclRefExpr *DR,
+                                                          AnalysisDeclContext *AC) {
+  // Check if the type of the Decl is const-qualified
+  if (DR->getType().isConstQualified())
+    return true;
+
+  // Check for an enum
+  if (isa<EnumConstantDecl>(DR->getDecl()))
+    return true;
+
+  const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl());
+  if (!VD)
+    return true;
+
+  // Check if the Decl behaves like a constant. This check also takes care of
+  // static variables, which can only change between function calls if they are
+  // modified in the AST.
+  PseudoConstantAnalysis *PCA = AC->getPseudoConstantAnalysis();
+  if (PCA->isPseudoConstant(VD))
+    return true;
+
+  return false;
+}
+
+// Recursively find any substatements containing VarDecl's with storage other
+// than local
+bool IdempotentOperationChecker::containsNonLocalVarDecl(const Stmt *S) {
+  const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(S);
+
+  if (DR)
+    if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl()))
+      if (!VD->hasLocalStorage())
+        return true;
+
+  for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end();
+      ++I)
+    if (const Stmt *child = *I)
+      if (containsNonLocalVarDecl(child))
+        return true;
+
+  return false;
+}
+
+
+void ento::registerIdempotentOperationChecker(CheckerManager &mgr) {
+  mgr.registerChecker<IdempotentOperationChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h
new file mode 100644
index 0000000..e35557f
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h
@@ -0,0 +1,22 @@
+//==--- InterCheckerAPI.h ---------------------------------------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This file allows introduction of checker dependencies. It contains APIs for
+// inter-checker communications.
+//===----------------------------------------------------------------------===//
+
+#ifndef INTERCHECKERAPI_H_
+#define INTERCHECKERAPI_H_
+namespace clang {
+namespace ento {
+
+/// Register the checker which evaluates CString API calls.
+void registerCStringCheckerBasic(CheckerManager &Mgr);
+
+}}
+#endif /* INTERCHECKERAPI_H_ */
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
new file mode 100644
index 0000000..cc940be
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
@@ -0,0 +1,760 @@
+//=- IvarInvalidationChecker.cpp - -*- C++ -------------------------------*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This checker implements annotation driven invalidation checking. If a class
+//  contains a method annotated with 'objc_instance_variable_invalidator',
+//  - (void) foo
+//           __attribute__((annotate("objc_instance_variable_invalidator")));
+//  all the "ivalidatable" instance variables of this class should be
+//  invalidated. We call an instance variable ivalidatable if it is an object of
+//  a class which contains an invalidation method. There could be multiple
+//  methods annotated with such annotations per class, either one can be used
+//  to invalidate the ivar. An ivar or property are considered to be
+//  invalidated if they are being assigned 'nil' or an invalidation method has
+//  been called on them. An invalidation method should either invalidate all
+//  the ivars or call another invalidation method (on self).
+//
+//  Partial invalidor annotation allows to addess cases when ivars are 
+//  invalidated by other methods, which might or might not be called from 
+//  the invalidation method. The checker checks that each invalidation
+//  method and all the partial methods cumulatively invalidate all ivars.
+//    __attribute__((annotate("objc_instance_variable_invalidator_partial")));
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallString.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+struct ChecksFilter {
+  /// Check for missing invalidation method declarations.
+  DefaultBool check_MissingInvalidationMethod;
+  /// Check that all ivars are invalidated.
+  DefaultBool check_InstanceVariableInvalidation;
+};
+
+class IvarInvalidationCheckerImpl {
+
+  typedef llvm::SmallSetVector<const ObjCMethodDecl*, 2> MethodSet;
+  typedef llvm::DenseMap<const ObjCMethodDecl*,
+                         const ObjCIvarDecl*> MethToIvarMapTy;
+  typedef llvm::DenseMap<const ObjCPropertyDecl*,
+                         const ObjCIvarDecl*> PropToIvarMapTy;
+  typedef llvm::DenseMap<const ObjCIvarDecl*,
+                         const ObjCPropertyDecl*> IvarToPropMapTy;
+
+
+  struct InvalidationInfo {
+    /// Has the ivar been invalidated?
+    bool IsInvalidated;
+
+    /// The methods which can be used to invalidate the ivar.
+    MethodSet InvalidationMethods;
+
+    InvalidationInfo() : IsInvalidated(false) {}
+    void addInvalidationMethod(const ObjCMethodDecl *MD) {
+      InvalidationMethods.insert(MD);
+    }
+
+    bool needsInvalidation() const {
+      return !InvalidationMethods.empty();
+    }
+
+    bool hasMethod(const ObjCMethodDecl *MD) {
+      if (IsInvalidated)
+        return true;
+      for (MethodSet::iterator I = InvalidationMethods.begin(),
+          E = InvalidationMethods.end(); I != E; ++I) {
+        if (*I == MD) {
+          IsInvalidated = true;
+          return true;
+        }
+      }
+      return false;
+    }
+  };
+
+  typedef llvm::DenseMap<const ObjCIvarDecl*, InvalidationInfo> IvarSet;
+
+  /// Statement visitor, which walks the method body and flags the ivars
+  /// referenced in it (either directly or via property).
+  class MethodCrawler : public ConstStmtVisitor<MethodCrawler> {
+    /// The set of Ivars which need to be invalidated.
+    IvarSet &IVars;
+
+    /// Flag is set as the result of a message send to another
+    /// invalidation method.
+    bool &CalledAnotherInvalidationMethod;
+
+    /// Property setter to ivar mapping.
+    const MethToIvarMapTy &PropertySetterToIvarMap;
+
+    /// Property getter to ivar mapping.
+    const MethToIvarMapTy &PropertyGetterToIvarMap;
+
+    /// Property to ivar mapping.
+    const PropToIvarMapTy &PropertyToIvarMap;
+
+    /// The invalidation method being currently processed.
+    const ObjCMethodDecl *InvalidationMethod;
+
+    ASTContext &Ctx;
+
+    /// Peel off parens, casts, OpaqueValueExpr, and PseudoObjectExpr.
+    const Expr *peel(const Expr *E) const;
+
+    /// Does this expression represent zero: '0'?
+    bool isZero(const Expr *E) const;
+
+    /// Mark the given ivar as invalidated.
+    void markInvalidated(const ObjCIvarDecl *Iv);
+
+    /// Checks if IvarRef refers to the tracked IVar, if yes, marks it as
+    /// invalidated.
+    void checkObjCIvarRefExpr(const ObjCIvarRefExpr *IvarRef);
+
+    /// Checks if ObjCPropertyRefExpr refers to the tracked IVar, if yes, marks
+    /// it as invalidated.
+    void checkObjCPropertyRefExpr(const ObjCPropertyRefExpr *PA);
+
+    /// Checks if ObjCMessageExpr refers to (is a getter for) the tracked IVar,
+    /// if yes, marks it as invalidated.
+    void checkObjCMessageExpr(const ObjCMessageExpr *ME);
+
+    /// Checks if the Expr refers to an ivar, if yes, marks it as invalidated.
+    void check(const Expr *E);
+
+  public:
+    MethodCrawler(IvarSet &InIVars,
+                  bool &InCalledAnotherInvalidationMethod,
+                  const MethToIvarMapTy &InPropertySetterToIvarMap,
+                  const MethToIvarMapTy &InPropertyGetterToIvarMap,
+                  const PropToIvarMapTy &InPropertyToIvarMap,
+                  ASTContext &InCtx)
+    : IVars(InIVars),
+      CalledAnotherInvalidationMethod(InCalledAnotherInvalidationMethod),
+      PropertySetterToIvarMap(InPropertySetterToIvarMap),
+      PropertyGetterToIvarMap(InPropertyGetterToIvarMap),
+      PropertyToIvarMap(InPropertyToIvarMap),
+      InvalidationMethod(0),
+      Ctx(InCtx) {}
+
+    void VisitStmt(const Stmt *S) { VisitChildren(S); }
+
+    void VisitBinaryOperator(const BinaryOperator *BO);
+
+    void VisitObjCMessageExpr(const ObjCMessageExpr *ME);
+
+    void VisitChildren(const Stmt *S) {
+      for (Stmt::const_child_range I = S->children(); I; ++I) {
+        if (*I)
+          this->Visit(*I);
+        if (CalledAnotherInvalidationMethod)
+          return;
+      }
+    }
+  };
+
+  /// Check if the any of the methods inside the interface are annotated with
+  /// the invalidation annotation, update the IvarInfo accordingly.
+  /// \param LookForPartial is set when we are searching for partial
+  ///        invalidators.
+  static void containsInvalidationMethod(const ObjCContainerDecl *D,
+                                         InvalidationInfo &Out,
+                                         bool LookForPartial);
+
+  /// Check if ivar should be tracked and add to TrackedIvars if positive.
+  /// Returns true if ivar should be tracked.
+  static bool trackIvar(const ObjCIvarDecl *Iv, IvarSet &TrackedIvars,
+                        const ObjCIvarDecl **FirstIvarDecl);
+
+  /// Given the property declaration, and the list of tracked ivars, finds
+  /// the ivar backing the property when possible. Returns '0' when no such
+  /// ivar could be found.
+  static const ObjCIvarDecl *findPropertyBackingIvar(
+      const ObjCPropertyDecl *Prop,
+      const ObjCInterfaceDecl *InterfaceD,
+      IvarSet &TrackedIvars,
+      const ObjCIvarDecl **FirstIvarDecl);
+
+  /// Print ivar name or the property if the given ivar backs a property.
+  static void printIvar(llvm::raw_svector_ostream &os,
+                        const ObjCIvarDecl *IvarDecl,
+                        const IvarToPropMapTy &IvarToPopertyMap);
+
+  void reportNoInvalidationMethod(const ObjCIvarDecl *FirstIvarDecl,
+                                  const IvarToPropMapTy &IvarToPopertyMap,
+                                  const ObjCInterfaceDecl *InterfaceD,
+                                  bool MissingDeclaration) const;
+  void reportIvarNeedsInvalidation(const ObjCIvarDecl *IvarD,
+                                   const IvarToPropMapTy &IvarToPopertyMap,
+                                   const ObjCMethodDecl *MethodD) const;
+
+  AnalysisManager& Mgr;
+  BugReporter &BR;
+  /// Filter on the checks performed.
+  const ChecksFilter &Filter;
+
+public:
+  IvarInvalidationCheckerImpl(AnalysisManager& InMgr,
+                              BugReporter &InBR,
+                              const ChecksFilter &InFilter) :
+    Mgr (InMgr), BR(InBR), Filter(InFilter) {}
+
+  void visit(const ObjCImplementationDecl *D) const;
+};
+
+static bool isInvalidationMethod(const ObjCMethodDecl *M, bool LookForPartial) {
+  for (specific_attr_iterator<AnnotateAttr>
+       AI = M->specific_attr_begin<AnnotateAttr>(),
+       AE = M->specific_attr_end<AnnotateAttr>(); AI != AE; ++AI) {
+    const AnnotateAttr *Ann = *AI;
+    if (!LookForPartial &&
+        Ann->getAnnotation() == "objc_instance_variable_invalidator")
+      return true;
+    if (LookForPartial &&
+        Ann->getAnnotation() == "objc_instance_variable_invalidator_partial")
+      return true;
+  }
+  return false;
+}
+
+void IvarInvalidationCheckerImpl::containsInvalidationMethod(
+    const ObjCContainerDecl *D, InvalidationInfo &OutInfo, bool Partial) {
+
+  if (!D)
+    return;
+
+  assert(!isa<ObjCImplementationDecl>(D));
+  // TODO: Cache the results.
+
+  // Check all methods.
+  for (ObjCContainerDecl::method_iterator
+      I = D->meth_begin(),
+      E = D->meth_end(); I != E; ++I) {
+      const ObjCMethodDecl *MDI = *I;
+      if (isInvalidationMethod(MDI, Partial))
+        OutInfo.addInvalidationMethod(
+                               cast<ObjCMethodDecl>(MDI->getCanonicalDecl()));
+  }
+
+  // If interface, check all parent protocols and super.
+  if (const ObjCInterfaceDecl *InterfD = dyn_cast<ObjCInterfaceDecl>(D)) {
+
+    // Visit all protocols.
+    for (ObjCInterfaceDecl::protocol_iterator
+        I = InterfD->protocol_begin(),
+        E = InterfD->protocol_end(); I != E; ++I) {
+      containsInvalidationMethod((*I)->getDefinition(), OutInfo, Partial);
+    }
+
+    // Visit all categories in case the invalidation method is declared in
+    // a category.
+    for (ObjCInterfaceDecl::visible_extensions_iterator
+           Ext = InterfD->visible_extensions_begin(),
+           ExtEnd = InterfD->visible_extensions_end();
+         Ext != ExtEnd; ++Ext) {
+      containsInvalidationMethod(*Ext, OutInfo, Partial);
+    }
+
+    containsInvalidationMethod(InterfD->getSuperClass(), OutInfo, Partial);
+    return;
+  }
+
+  // If protocol, check all parent protocols.
+  if (const ObjCProtocolDecl *ProtD = dyn_cast<ObjCProtocolDecl>(D)) {
+    for (ObjCInterfaceDecl::protocol_iterator
+        I = ProtD->protocol_begin(),
+        E = ProtD->protocol_end(); I != E; ++I) {
+      containsInvalidationMethod((*I)->getDefinition(), OutInfo, Partial);
+    }
+    return;
+  }
+
+  return;
+}
+
+bool IvarInvalidationCheckerImpl::trackIvar(const ObjCIvarDecl *Iv,
+                                        IvarSet &TrackedIvars,
+                                        const ObjCIvarDecl **FirstIvarDecl) {
+  QualType IvQTy = Iv->getType();
+  const ObjCObjectPointerType *IvTy = IvQTy->getAs<ObjCObjectPointerType>();
+  if (!IvTy)
+    return false;
+  const ObjCInterfaceDecl *IvInterf = IvTy->getInterfaceDecl();
+
+  InvalidationInfo Info;
+  containsInvalidationMethod(IvInterf, Info, /*LookForPartial*/ false);
+  if (Info.needsInvalidation()) {
+    const ObjCIvarDecl *I = cast<ObjCIvarDecl>(Iv->getCanonicalDecl());
+    TrackedIvars[I] = Info;
+    if (!*FirstIvarDecl)
+      *FirstIvarDecl = I;
+    return true;
+  }
+  return false;
+}
+
+const ObjCIvarDecl *IvarInvalidationCheckerImpl::findPropertyBackingIvar(
+                        const ObjCPropertyDecl *Prop,
+                        const ObjCInterfaceDecl *InterfaceD,
+                        IvarSet &TrackedIvars,
+                        const ObjCIvarDecl **FirstIvarDecl) {
+  const ObjCIvarDecl *IvarD = 0;
+
+  // Lookup for the synthesized case.
+  IvarD = Prop->getPropertyIvarDecl();
+  // We only track the ivars/properties that are defined in the current 
+  // class (not the parent).
+  if (IvarD && IvarD->getContainingInterface() == InterfaceD) {
+    if (TrackedIvars.count(IvarD)) {
+      return IvarD;
+    }
+    // If the ivar is synthesized we still want to track it.
+    if (trackIvar(IvarD, TrackedIvars, FirstIvarDecl))
+      return IvarD;
+  }
+
+  // Lookup IVars named "_PropName"or "PropName" among the tracked Ivars.
+  StringRef PropName = Prop->getIdentifier()->getName();
+  for (IvarSet::const_iterator I = TrackedIvars.begin(),
+                               E = TrackedIvars.end(); I != E; ++I) {
+    const ObjCIvarDecl *Iv = I->first;
+    StringRef IvarName = Iv->getName();
+
+    if (IvarName == PropName)
+      return Iv;
+
+    SmallString<128> PropNameWithUnderscore;
+    {
+      llvm::raw_svector_ostream os(PropNameWithUnderscore);
+      os << '_' << PropName;
+    }
+    if (IvarName == PropNameWithUnderscore.str())
+      return Iv;
+  }
+
+  // Note, this is a possible source of false positives. We could look at the
+  // getter implementation to find the ivar when its name is not derived from
+  // the property name.
+  return 0;
+}
+
+void IvarInvalidationCheckerImpl::printIvar(llvm::raw_svector_ostream &os,
+                                      const ObjCIvarDecl *IvarDecl,
+                                      const IvarToPropMapTy &IvarToPopertyMap) {
+  if (IvarDecl->getSynthesize()) {
+    const ObjCPropertyDecl *PD = IvarToPopertyMap.lookup(IvarDecl);
+    assert(PD &&"Do we synthesize ivars for something other than properties?");
+    os << "Property "<< PD->getName() << " ";
+  } else {
+    os << "Instance variable "<< IvarDecl->getName() << " ";
+  }
+}
+
+// Check that the invalidatable interfaces with ivars/properties implement the
+// invalidation methods.
+void IvarInvalidationCheckerImpl::
+visit(const ObjCImplementationDecl *ImplD) const {
+  // Collect all ivars that need cleanup.
+  IvarSet Ivars;
+  // Record the first Ivar needing invalidation; used in reporting when only
+  // one ivar is sufficient. Cannot grab the first on the Ivars set to ensure
+  // deterministic output.
+  const ObjCIvarDecl *FirstIvarDecl = 0;
+  const ObjCInterfaceDecl *InterfaceD = ImplD->getClassInterface();
+
+  // Collect ivars declared in this class, its extensions and its implementation
+  ObjCInterfaceDecl *IDecl = const_cast<ObjCInterfaceDecl *>(InterfaceD);
+  for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
+       Iv= Iv->getNextIvar())
+    trackIvar(Iv, Ivars, &FirstIvarDecl);
+
+  // Construct Property/Property Accessor to Ivar maps to assist checking if an
+  // ivar which is backing a property has been reset.
+  MethToIvarMapTy PropSetterToIvarMap;
+  MethToIvarMapTy PropGetterToIvarMap;
+  PropToIvarMapTy PropertyToIvarMap;
+  IvarToPropMapTy IvarToPopertyMap;
+
+  ObjCInterfaceDecl::PropertyMap PropMap;
+  ObjCInterfaceDecl::PropertyDeclOrder PropOrder;
+  InterfaceD->collectPropertiesToImplement(PropMap, PropOrder);
+
+  for (ObjCInterfaceDecl::PropertyMap::iterator
+      I = PropMap.begin(), E = PropMap.end(); I != E; ++I) {
+    const ObjCPropertyDecl *PD = I->second;
+
+    const ObjCIvarDecl *ID = findPropertyBackingIvar(PD, InterfaceD, Ivars,
+                                                     &FirstIvarDecl);
+    if (!ID)
+      continue;
+
+    // Store the mappings.
+    PD = cast<ObjCPropertyDecl>(PD->getCanonicalDecl());
+    PropertyToIvarMap[PD] = ID;
+    IvarToPopertyMap[ID] = PD;
+
+    // Find the setter and the getter.
+    const ObjCMethodDecl *SetterD = PD->getSetterMethodDecl();
+    if (SetterD) {
+      SetterD = cast<ObjCMethodDecl>(SetterD->getCanonicalDecl());
+      PropSetterToIvarMap[SetterD] = ID;
+    }
+
+    const ObjCMethodDecl *GetterD = PD->getGetterMethodDecl();
+    if (GetterD) {
+      GetterD = cast<ObjCMethodDecl>(GetterD->getCanonicalDecl());
+      PropGetterToIvarMap[GetterD] = ID;
+    }
+  }
+
+  // If no ivars need invalidation, there is nothing to check here.
+  if (Ivars.empty())
+    return;
+
+  // Find all partial invalidation methods.
+  InvalidationInfo PartialInfo;
+  containsInvalidationMethod(InterfaceD, PartialInfo, /*LookForPartial*/ true);
+
+  // Remove ivars invalidated by the partial invalidation methods. They do not
+  // need to be invalidated in the regular invalidation methods.
+  bool AtImplementationContainsAtLeastOnePartialInvalidationMethod = false;
+  for (MethodSet::iterator
+      I = PartialInfo.InvalidationMethods.begin(),
+      E = PartialInfo.InvalidationMethods.end(); I != E; ++I) {
+    const ObjCMethodDecl *InterfD = *I;
+
+    // Get the corresponding method in the @implementation.
+    const ObjCMethodDecl *D = ImplD->getMethod(InterfD->getSelector(),
+                                               InterfD->isInstanceMethod());
+    if (D && D->hasBody()) {
+      AtImplementationContainsAtLeastOnePartialInvalidationMethod = true;
+
+      bool CalledAnotherInvalidationMethod = false;
+      // The MethodCrowler is going to remove the invalidated ivars.
+      MethodCrawler(Ivars,
+                    CalledAnotherInvalidationMethod,
+                    PropSetterToIvarMap,
+                    PropGetterToIvarMap,
+                    PropertyToIvarMap,
+                    BR.getContext()).VisitStmt(D->getBody());
+      // If another invalidation method was called, trust that full invalidation
+      // has occurred.
+      if (CalledAnotherInvalidationMethod)
+        Ivars.clear();
+    }
+  }
+
+  // If all ivars have been invalidated by partial invalidators, there is
+  // nothing to check here.
+  if (Ivars.empty())
+    return;
+
+  // Find all invalidation methods in this @interface declaration and parents.
+  InvalidationInfo Info;
+  containsInvalidationMethod(InterfaceD, Info, /*LookForPartial*/ false);
+
+  // Report an error in case none of the invalidation methods are declared.
+  if (!Info.needsInvalidation() && !PartialInfo.needsInvalidation()) {
+    if (Filter.check_MissingInvalidationMethod)
+      reportNoInvalidationMethod(FirstIvarDecl, IvarToPopertyMap, InterfaceD,
+                                 /*MissingDeclaration*/ true);
+    // If there are no invalidation methods, there is no ivar validation work
+    // to be done.
+    return;
+  }
+
+  // Only check if Ivars are invalidated when InstanceVariableInvalidation
+  // has been requested.
+  if (!Filter.check_InstanceVariableInvalidation)
+    return;
+
+  // Check that all ivars are invalidated by the invalidation methods.
+  bool AtImplementationContainsAtLeastOneInvalidationMethod = false;
+  for (MethodSet::iterator I = Info.InvalidationMethods.begin(),
+                           E = Info.InvalidationMethods.end(); I != E; ++I) {
+    const ObjCMethodDecl *InterfD = *I;
+
+    // Get the corresponding method in the @implementation.
+    const ObjCMethodDecl *D = ImplD->getMethod(InterfD->getSelector(),
+                                               InterfD->isInstanceMethod());
+    if (D && D->hasBody()) {
+      AtImplementationContainsAtLeastOneInvalidationMethod = true;
+
+      // Get a copy of ivars needing invalidation.
+      IvarSet IvarsI = Ivars;
+
+      bool CalledAnotherInvalidationMethod = false;
+      MethodCrawler(IvarsI,
+                    CalledAnotherInvalidationMethod,
+                    PropSetterToIvarMap,
+                    PropGetterToIvarMap,
+                    PropertyToIvarMap,
+                    BR.getContext()).VisitStmt(D->getBody());
+      // If another invalidation method was called, trust that full invalidation
+      // has occurred.
+      if (CalledAnotherInvalidationMethod)
+        continue;
+
+      // Warn on the ivars that were not invalidated by the method.
+      for (IvarSet::const_iterator
+          I = IvarsI.begin(), E = IvarsI.end(); I != E; ++I)
+        reportIvarNeedsInvalidation(I->first, IvarToPopertyMap, D);
+    }
+  }
+
+  // Report an error in case none of the invalidation methods are implemented.
+  if (!AtImplementationContainsAtLeastOneInvalidationMethod) {
+    if (AtImplementationContainsAtLeastOnePartialInvalidationMethod) {
+      // Warn on the ivars that were not invalidated by the prrtial
+      // invalidation methods.
+      for (IvarSet::const_iterator
+           I = Ivars.begin(), E = Ivars.end(); I != E; ++I)
+        reportIvarNeedsInvalidation(I->first, IvarToPopertyMap, 0);
+    } else {
+      // Otherwise, no invalidation methods were implemented.
+      reportNoInvalidationMethod(FirstIvarDecl, IvarToPopertyMap, InterfaceD,
+                                 /*MissingDeclaration*/ false);
+    }
+  }
+}
+
+void IvarInvalidationCheckerImpl::
+reportNoInvalidationMethod(const ObjCIvarDecl *FirstIvarDecl,
+                           const IvarToPropMapTy &IvarToPopertyMap,
+                           const ObjCInterfaceDecl *InterfaceD,
+                           bool MissingDeclaration) const {
+  SmallString<128> sbuf;
+  llvm::raw_svector_ostream os(sbuf);
+  assert(FirstIvarDecl);
+  printIvar(os, FirstIvarDecl, IvarToPopertyMap);
+  os << "needs to be invalidated; ";
+  if (MissingDeclaration)
+    os << "no invalidation method is declared for ";
+  else
+    os << "no invalidation method is defined in the @implementation for ";
+  os << InterfaceD->getName();
+
+  PathDiagnosticLocation IvarDecLocation =
+    PathDiagnosticLocation::createBegin(FirstIvarDecl, BR.getSourceManager());
+
+  BR.EmitBasicReport(FirstIvarDecl, "Incomplete invalidation",
+                     categories::CoreFoundationObjectiveC, os.str(),
+                     IvarDecLocation);
+}
+
+void IvarInvalidationCheckerImpl::
+reportIvarNeedsInvalidation(const ObjCIvarDecl *IvarD,
+                            const IvarToPropMapTy &IvarToPopertyMap,
+                            const ObjCMethodDecl *MethodD) const {
+  SmallString<128> sbuf;
+  llvm::raw_svector_ostream os(sbuf);
+  printIvar(os, IvarD, IvarToPopertyMap);
+  os << "needs to be invalidated or set to nil";
+  if (MethodD) {
+    PathDiagnosticLocation MethodDecLocation =
+                           PathDiagnosticLocation::createEnd(MethodD->getBody(),
+                           BR.getSourceManager(),
+                           Mgr.getAnalysisDeclContext(MethodD));
+    BR.EmitBasicReport(MethodD, "Incomplete invalidation",
+                       categories::CoreFoundationObjectiveC, os.str(),
+                       MethodDecLocation);
+  } else {
+    BR.EmitBasicReport(IvarD, "Incomplete invalidation",
+                       categories::CoreFoundationObjectiveC, os.str(),
+                       PathDiagnosticLocation::createBegin(IvarD,
+                                                        BR.getSourceManager()));
+                       
+  }
+}
+
+void IvarInvalidationCheckerImpl::MethodCrawler::markInvalidated(
+    const ObjCIvarDecl *Iv) {
+  IvarSet::iterator I = IVars.find(Iv);
+  if (I != IVars.end()) {
+    // If InvalidationMethod is present, we are processing the message send and
+    // should ensure we are invalidating with the appropriate method,
+    // otherwise, we are processing setting to 'nil'.
+    if (!InvalidationMethod ||
+        (InvalidationMethod && I->second.hasMethod(InvalidationMethod)))
+      IVars.erase(I);
+  }
+}
+
+const Expr *IvarInvalidationCheckerImpl::MethodCrawler::peel(const Expr *E) const {
+  E = E->IgnoreParenCasts();
+  if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E))
+    E = POE->getSyntacticForm()->IgnoreParenCasts();
+  if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E))
+    E = OVE->getSourceExpr()->IgnoreParenCasts();
+  return E;
+}
+
+void IvarInvalidationCheckerImpl::MethodCrawler::checkObjCIvarRefExpr(
+    const ObjCIvarRefExpr *IvarRef) {
+  if (const Decl *D = IvarRef->getDecl())
+    markInvalidated(cast<ObjCIvarDecl>(D->getCanonicalDecl()));
+}
+
+void IvarInvalidationCheckerImpl::MethodCrawler::checkObjCMessageExpr(
+    const ObjCMessageExpr *ME) {
+  const ObjCMethodDecl *MD = ME->getMethodDecl();
+  if (MD) {
+    MD = cast<ObjCMethodDecl>(MD->getCanonicalDecl());
+    MethToIvarMapTy::const_iterator IvI = PropertyGetterToIvarMap.find(MD);
+    if (IvI != PropertyGetterToIvarMap.end())
+      markInvalidated(IvI->second);
+  }
+}
+
+void IvarInvalidationCheckerImpl::MethodCrawler::checkObjCPropertyRefExpr(
+    const ObjCPropertyRefExpr *PA) {
+
+  if (PA->isExplicitProperty()) {
+    const ObjCPropertyDecl *PD = PA->getExplicitProperty();
+    if (PD) {
+      PD = cast<ObjCPropertyDecl>(PD->getCanonicalDecl());
+      PropToIvarMapTy::const_iterator IvI = PropertyToIvarMap.find(PD);
+      if (IvI != PropertyToIvarMap.end())
+        markInvalidated(IvI->second);
+      return;
+    }
+  }
+
+  if (PA->isImplicitProperty()) {
+    const ObjCMethodDecl *MD = PA->getImplicitPropertySetter();
+    if (MD) {
+      MD = cast<ObjCMethodDecl>(MD->getCanonicalDecl());
+      MethToIvarMapTy::const_iterator IvI =PropertyGetterToIvarMap.find(MD);
+      if (IvI != PropertyGetterToIvarMap.end())
+        markInvalidated(IvI->second);
+      return;
+    }
+  }
+}
+
+bool IvarInvalidationCheckerImpl::MethodCrawler::isZero(const Expr *E) const {
+  E = peel(E);
+
+  return (E->isNullPointerConstant(Ctx, Expr::NPC_ValueDependentIsNotNull)
+           != Expr::NPCK_NotNull);
+}
+
+void IvarInvalidationCheckerImpl::MethodCrawler::check(const Expr *E) {
+  E = peel(E);
+
+  if (const ObjCIvarRefExpr *IvarRef = dyn_cast<ObjCIvarRefExpr>(E)) {
+    checkObjCIvarRefExpr(IvarRef);
+    return;
+  }
+
+  if (const ObjCPropertyRefExpr *PropRef = dyn_cast<ObjCPropertyRefExpr>(E)) {
+    checkObjCPropertyRefExpr(PropRef);
+    return;
+  }
+
+  if (const ObjCMessageExpr *MsgExpr = dyn_cast<ObjCMessageExpr>(E)) {
+    checkObjCMessageExpr(MsgExpr);
+    return;
+  }
+}
+
+void IvarInvalidationCheckerImpl::MethodCrawler::VisitBinaryOperator(
+    const BinaryOperator *BO) {
+  VisitStmt(BO);
+
+  // Do we assign/compare against zero? If yes, check the variable we are
+  // assigning to.
+  BinaryOperatorKind Opcode = BO->getOpcode();
+  if (Opcode != BO_Assign &&
+      Opcode != BO_EQ &&
+      Opcode != BO_NE)
+    return;
+
+  if (isZero(BO->getRHS())) {
+      check(BO->getLHS());
+      return;
+  }
+
+  if (Opcode != BO_Assign && isZero(BO->getLHS())) {
+    check(BO->getRHS());
+    return;
+  }
+}
+
+void IvarInvalidationCheckerImpl::MethodCrawler::VisitObjCMessageExpr(
+  const ObjCMessageExpr *ME) {
+  const ObjCMethodDecl *MD = ME->getMethodDecl();
+  const Expr *Receiver = ME->getInstanceReceiver();
+
+  // Stop if we are calling '[self invalidate]'.
+  if (Receiver && isInvalidationMethod(MD, /*LookForPartial*/ false))
+    if (Receiver->isObjCSelfExpr()) {
+      CalledAnotherInvalidationMethod = true;
+      return;
+    }
+
+  // Check if we call a setter and set the property to 'nil'.
+  if (MD && (ME->getNumArgs() == 1) && isZero(ME->getArg(0))) {
+    MD = cast<ObjCMethodDecl>(MD->getCanonicalDecl());
+    MethToIvarMapTy::const_iterator IvI = PropertySetterToIvarMap.find(MD);
+    if (IvI != PropertySetterToIvarMap.end()) {
+      markInvalidated(IvI->second);
+      return;
+    }
+  }
+
+  // Check if we call the 'invalidation' routine on the ivar.
+  if (Receiver) {
+    InvalidationMethod = MD;
+    check(Receiver->IgnoreParenCasts());
+    InvalidationMethod = 0;
+  }
+
+  VisitStmt(ME);
+}
+}
+
+// Register the checkers.
+namespace {
+
+class IvarInvalidationChecker :
+  public Checker<check::ASTDecl<ObjCImplementationDecl> > {
+public:
+  ChecksFilter Filter;
+public:
+  void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager& Mgr,
+                    BugReporter &BR) const {
+    IvarInvalidationCheckerImpl Walker(Mgr, BR, Filter);
+    Walker.visit(D);
+  }
+};
+}
+
+#define REGISTER_CHECKER(name) \
+void ento::register##name(CheckerManager &mgr) {\
+  mgr.registerChecker<IvarInvalidationChecker>()->Filter.check_##name = true;\
+}
+
+REGISTER_CHECKER(InstanceVariableInvalidation)
+REGISTER_CHECKER(MissingInvalidationMethod)
+
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
new file mode 100644
index 0000000..02a7cc3
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
@@ -0,0 +1,315 @@
+//=== LLVMConventionsChecker.cpp - Check LLVM codebase conventions ---*- C++ -*-
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines LLVMConventionsChecker, a bunch of small little checks
+// for checking specific coding conventions in the LLVM/Clang codebase.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+//===----------------------------------------------------------------------===//
+// Generic type checking routines.
+//===----------------------------------------------------------------------===//
+
+static bool IsLLVMStringRef(QualType T) {
+  const RecordType *RT = T->getAs<RecordType>();
+  if (!RT)
+    return false;
+
+  return StringRef(QualType(RT, 0).getAsString()) ==
+          "class StringRef";
+}
+
+/// Check whether the declaration is semantically inside the top-level
+/// namespace named by ns.
+static bool InNamespace(const Decl *D, StringRef NS) {
+  const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(D->getDeclContext());
+  if (!ND)
+    return false;
+  const IdentifierInfo *II = ND->getIdentifier();
+  if (!II || !II->getName().equals(NS))
+    return false;
+  return isa<TranslationUnitDecl>(ND->getDeclContext());
+}
+
+static bool IsStdString(QualType T) {
+  if (const ElaboratedType *QT = T->getAs<ElaboratedType>())
+    T = QT->getNamedType();
+
+  const TypedefType *TT = T->getAs<TypedefType>();
+  if (!TT)
+    return false;
+
+  const TypedefNameDecl *TD = TT->getDecl();
+
+  if (!InNamespace(TD, "std"))
+    return false;
+
+  return TD->getName() == "string";
+}
+
+static bool IsClangType(const RecordDecl *RD) {
+  return RD->getName() == "Type" && InNamespace(RD, "clang");
+}
+
+static bool IsClangDecl(const RecordDecl *RD) {
+  return RD->getName() == "Decl" && InNamespace(RD, "clang");
+}
+
+static bool IsClangStmt(const RecordDecl *RD) {
+  return RD->getName() == "Stmt" && InNamespace(RD, "clang");
+}
+
+static bool IsClangAttr(const RecordDecl *RD) {
+  return RD->getName() == "Attr" && InNamespace(RD, "clang");
+}
+
+static bool IsStdVector(QualType T) {
+  const TemplateSpecializationType *TS = T->getAs<TemplateSpecializationType>();
+  if (!TS)
+    return false;
+
+  TemplateName TM = TS->getTemplateName();
+  TemplateDecl *TD = TM.getAsTemplateDecl();
+
+  if (!TD || !InNamespace(TD, "std"))
+    return false;
+
+  return TD->getName() == "vector";
+}
+
+static bool IsSmallVector(QualType T) {
+  const TemplateSpecializationType *TS = T->getAs<TemplateSpecializationType>();
+  if (!TS)
+    return false;
+
+  TemplateName TM = TS->getTemplateName();
+  TemplateDecl *TD = TM.getAsTemplateDecl();
+
+  if (!TD || !InNamespace(TD, "llvm"))
+    return false;
+
+  return TD->getName() == "SmallVector";
+}
+
+//===----------------------------------------------------------------------===//
+// CHECK: a StringRef should not be bound to a temporary std::string whose
+// lifetime is shorter than the StringRef's.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class StringRefCheckerVisitor : public StmtVisitor<StringRefCheckerVisitor> {
+  BugReporter &BR;
+  const Decl *DeclWithIssue;
+public:
+  StringRefCheckerVisitor(const Decl *declWithIssue, BugReporter &br)
+    : BR(br), DeclWithIssue(declWithIssue) {}
+  void VisitChildren(Stmt *S) {
+    for (Stmt::child_iterator I = S->child_begin(), E = S->child_end() ;
+      I != E; ++I)
+      if (Stmt *child = *I)
+        Visit(child);
+  }
+  void VisitStmt(Stmt *S) { VisitChildren(S); }
+  void VisitDeclStmt(DeclStmt *DS);
+private:
+  void VisitVarDecl(VarDecl *VD);
+};
+} // end anonymous namespace
+
+static void CheckStringRefAssignedTemporary(const Decl *D, BugReporter &BR) {
+  StringRefCheckerVisitor walker(D, BR);
+  walker.Visit(D->getBody());
+}
+
+void StringRefCheckerVisitor::VisitDeclStmt(DeclStmt *S) {
+  VisitChildren(S);
+
+  for (DeclStmt::decl_iterator I = S->decl_begin(), E = S->decl_end();I!=E; ++I)
+    if (VarDecl *VD = dyn_cast<VarDecl>(*I))
+      VisitVarDecl(VD);
+}
+
+void StringRefCheckerVisitor::VisitVarDecl(VarDecl *VD) {
+  Expr *Init = VD->getInit();
+  if (!Init)
+    return;
+
+  // Pattern match for:
+  // StringRef x = call() (where call returns std::string)
+  if (!IsLLVMStringRef(VD->getType()))
+    return;
+  ExprWithCleanups *Ex1 = dyn_cast<ExprWithCleanups>(Init);
+  if (!Ex1)
+    return;
+  CXXConstructExpr *Ex2 = dyn_cast<CXXConstructExpr>(Ex1->getSubExpr());
+  if (!Ex2 || Ex2->getNumArgs() != 1)
+    return;
+  ImplicitCastExpr *Ex3 = dyn_cast<ImplicitCastExpr>(Ex2->getArg(0));
+  if (!Ex3)
+    return;
+  CXXConstructExpr *Ex4 = dyn_cast<CXXConstructExpr>(Ex3->getSubExpr());
+  if (!Ex4 || Ex4->getNumArgs() != 1)
+    return;
+  ImplicitCastExpr *Ex5 = dyn_cast<ImplicitCastExpr>(Ex4->getArg(0));
+  if (!Ex5)
+    return;
+  CXXBindTemporaryExpr *Ex6 = dyn_cast<CXXBindTemporaryExpr>(Ex5->getSubExpr());
+  if (!Ex6 || !IsStdString(Ex6->getType()))
+    return;
+
+  // Okay, badness!  Report an error.
+  const char *desc = "StringRef should not be bound to temporary "
+                     "std::string that it outlives";
+  PathDiagnosticLocation VDLoc =
+    PathDiagnosticLocation::createBegin(VD, BR.getSourceManager());
+  BR.EmitBasicReport(DeclWithIssue, desc, "LLVM Conventions", desc,
+                     VDLoc, Init->getSourceRange());
+}
+
+//===----------------------------------------------------------------------===//
+// CHECK: Clang AST nodes should not have fields that can allocate
+//   memory.
+//===----------------------------------------------------------------------===//
+
+static bool AllocatesMemory(QualType T) {
+  return IsStdVector(T) || IsStdString(T) || IsSmallVector(T);
+}
+
+// This type checking could be sped up via dynamic programming.
+static bool IsPartOfAST(const CXXRecordDecl *R) {
+  if (IsClangStmt(R) || IsClangType(R) || IsClangDecl(R) || IsClangAttr(R))
+    return true;
+
+  for (CXXRecordDecl::base_class_const_iterator I = R->bases_begin(),
+                                                E = R->bases_end(); I!=E; ++I) {
+    CXXBaseSpecifier BS = *I;
+    QualType T = BS.getType();
+    if (const RecordType *baseT = T->getAs<RecordType>()) {
+      CXXRecordDecl *baseD = cast<CXXRecordDecl>(baseT->getDecl());
+      if (IsPartOfAST(baseD))
+        return true;
+    }
+  }
+
+  return false;
+}
+
+namespace {
+class ASTFieldVisitor {
+  SmallVector<FieldDecl*, 10> FieldChain;
+  const CXXRecordDecl *Root;
+  BugReporter &BR;
+public:
+  ASTFieldVisitor(const CXXRecordDecl *root, BugReporter &br)
+    : Root(root), BR(br) {}
+
+  void Visit(FieldDecl *D);
+  void ReportError(QualType T);
+};
+} // end anonymous namespace
+
+static void CheckASTMemory(const CXXRecordDecl *R, BugReporter &BR) {
+  if (!IsPartOfAST(R))
+    return;
+
+  for (RecordDecl::field_iterator I = R->field_begin(), E = R->field_end();
+       I != E; ++I) {
+    ASTFieldVisitor walker(R, BR);
+    walker.Visit(*I);
+  }
+}
+
+void ASTFieldVisitor::Visit(FieldDecl *D) {
+  FieldChain.push_back(D);
+
+  QualType T = D->getType();
+
+  if (AllocatesMemory(T))
+    ReportError(T);
+
+  if (const RecordType *RT = T->getAs<RecordType>()) {
+    const RecordDecl *RD = RT->getDecl()->getDefinition();
+    for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
+         I != E; ++I)
+      Visit(*I);
+  }
+
+  FieldChain.pop_back();
+}
+
+void ASTFieldVisitor::ReportError(QualType T) {
+  SmallString<1024> buf;
+  llvm::raw_svector_ostream os(buf);
+
+  os << "AST class '" << Root->getName() << "' has a field '"
+     << FieldChain.front()->getName() << "' that allocates heap memory";
+  if (FieldChain.size() > 1) {
+    os << " via the following chain: ";
+    bool isFirst = true;
+    for (SmallVectorImpl<FieldDecl*>::iterator I=FieldChain.begin(),
+         E=FieldChain.end(); I!=E; ++I) {
+      if (!isFirst)
+        os << '.';
+      else
+        isFirst = false;
+      os << (*I)->getName();
+    }
+  }
+  os << " (type " << FieldChain.back()->getType().getAsString() << ")";
+  os.flush();
+
+  // Note that this will fire for every translation unit that uses this
+  // class.  This is suboptimal, but at least scan-build will merge
+  // duplicate HTML reports.  In the future we need a unified way of merging
+  // duplicate reports across translation units.  For C++ classes we cannot
+  // just report warnings when we see an out-of-line method definition for a
+  // class, as that heuristic doesn't always work (the complete definition of
+  // the class may be in the header file, for example).
+  PathDiagnosticLocation L = PathDiagnosticLocation::createBegin(
+                               FieldChain.front(), BR.getSourceManager());
+  BR.EmitBasicReport(Root, "AST node allocates heap memory", "LLVM Conventions",
+                     os.str(), L);
+}
+
+//===----------------------------------------------------------------------===//
+// LLVMConventionsChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class LLVMConventionsChecker : public Checker<
+                                                check::ASTDecl<CXXRecordDecl>,
+                                                check::ASTCodeBody > {
+public:
+  void checkASTDecl(const CXXRecordDecl *R, AnalysisManager& mgr,
+                    BugReporter &BR) const {
+    if (R->isCompleteDefinition())
+      CheckASTMemory(R, BR);
+  }
+
+  void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+                        BugReporter &BR) const {
+    CheckStringRefAssignedTemporary(D, BR);
+  }
+};
+}
+
+void ento::registerLLVMConventionsChecker(CheckerManager &mgr) {
+  mgr.registerChecker<LLVMConventionsChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
new file mode 100644
index 0000000..f1f06c7
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
@@ -0,0 +1,623 @@
+//==--- MacOSKeychainAPIChecker.cpp ------------------------------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This checker flags misuses of KeyChainAPI. In particular, the password data
+// allocated/returned by SecKeychainItemCopyContent,
+// SecKeychainFindGenericPassword, SecKeychainFindInternetPassword functions has
+// to be freed using a call to SecKeychainItemFreeContent.
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class MacOSKeychainAPIChecker : public Checker<check::PreStmt<CallExpr>,
+                                               check::PostStmt<CallExpr>,
+                                               check::DeadSymbols> {
+  mutable OwningPtr<BugType> BT;
+
+public:
+  /// AllocationState is a part of the checker specific state together with the
+  /// MemRegion corresponding to the allocated data.
+  struct AllocationState {
+    /// The index of the allocator function.
+    unsigned int AllocatorIdx;
+    SymbolRef Region;
+
+    AllocationState(const Expr *E, unsigned int Idx, SymbolRef R) :
+      AllocatorIdx(Idx),
+      Region(R) {}
+
+    bool operator==(const AllocationState &X) const {
+      return (AllocatorIdx == X.AllocatorIdx &&
+              Region == X.Region);
+    }
+
+    void Profile(llvm::FoldingSetNodeID &ID) const {
+      ID.AddInteger(AllocatorIdx);
+      ID.AddPointer(Region);
+    }
+  };
+
+  void checkPreStmt(const CallExpr *S, CheckerContext &C) const;
+  void checkPostStmt(const CallExpr *S, CheckerContext &C) const;
+  void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
+
+private:
+  typedef std::pair<SymbolRef, const AllocationState*> AllocationPair;
+  typedef SmallVector<AllocationPair, 2> AllocationPairVec;
+
+  enum APIKind {
+    /// Denotes functions tracked by this checker.
+    ValidAPI = 0,
+    /// The functions commonly/mistakenly used in place of the given API.
+    ErrorAPI = 1,
+    /// The functions which may allocate the data. These are tracked to reduce
+    /// the false alarm rate.
+    PossibleAPI = 2
+  };
+  /// Stores the information about the allocator and deallocator functions -
+  /// these are the functions the checker is tracking.
+  struct ADFunctionInfo {
+    const char* Name;
+    unsigned int Param;
+    unsigned int DeallocatorIdx;
+    APIKind Kind;
+  };
+  static const unsigned InvalidIdx = 100000;
+  static const unsigned FunctionsToTrackSize = 8;
+  static const ADFunctionInfo FunctionsToTrack[FunctionsToTrackSize];
+  /// The value, which represents no error return value for allocator functions.
+  static const unsigned NoErr = 0;
+
+  /// Given the function name, returns the index of the allocator/deallocator
+  /// function.
+  static unsigned getTrackedFunctionIndex(StringRef Name, bool IsAllocator);
+
+  inline void initBugType() const {
+    if (!BT)
+      BT.reset(new BugType("Improper use of SecKeychain API",
+                           "API Misuse (Apple)"));
+  }
+
+  void generateDeallocatorMismatchReport(const AllocationPair &AP,
+                                         const Expr *ArgExpr,
+                                         CheckerContext &C) const;
+
+  /// Find the allocation site for Sym on the path leading to the node N.
+  const ExplodedNode *getAllocationNode(const ExplodedNode *N, SymbolRef Sym,
+                                        CheckerContext &C) const;
+
+  BugReport *generateAllocatedDataNotReleasedReport(const AllocationPair &AP,
+                                                    ExplodedNode *N,
+                                                    CheckerContext &C) const;
+
+  /// Check if RetSym evaluates to an error value in the current state.
+  bool definitelyReturnedError(SymbolRef RetSym,
+                               ProgramStateRef State,
+                               SValBuilder &Builder,
+                               bool noError = false) const;
+
+  /// Check if RetSym evaluates to a NoErr value in the current state.
+  bool definitelyDidnotReturnError(SymbolRef RetSym,
+                                   ProgramStateRef State,
+                                   SValBuilder &Builder) const {
+    return definitelyReturnedError(RetSym, State, Builder, true);
+  }
+                                                 
+  /// Mark an AllocationPair interesting for diagnostic reporting.
+  void markInteresting(BugReport *R, const AllocationPair &AP) const {
+    R->markInteresting(AP.first);
+    R->markInteresting(AP.second->Region);
+  }
+
+  /// The bug visitor which allows us to print extra diagnostics along the
+  /// BugReport path. For example, showing the allocation site of the leaked
+  /// region.
+  class SecKeychainBugVisitor
+    : public BugReporterVisitorImpl<SecKeychainBugVisitor> {
+  protected:
+    // The allocated region symbol tracked by the main analysis.
+    SymbolRef Sym;
+
+  public:
+    SecKeychainBugVisitor(SymbolRef S) : Sym(S) {}
+    virtual ~SecKeychainBugVisitor() {}
+
+    void Profile(llvm::FoldingSetNodeID &ID) const {
+      static int X = 0;
+      ID.AddPointer(&X);
+      ID.AddPointer(Sym);
+    }
+
+    PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+                                   const ExplodedNode *PrevN,
+                                   BugReporterContext &BRC,
+                                   BugReport &BR);
+  };
+};
+}
+
+/// ProgramState traits to store the currently allocated (and not yet freed)
+/// symbols. This is a map from the allocated content symbol to the
+/// corresponding AllocationState.
+REGISTER_MAP_WITH_PROGRAMSTATE(AllocatedData,
+                               SymbolRef,
+                               MacOSKeychainAPIChecker::AllocationState)
+
+static bool isEnclosingFunctionParam(const Expr *E) {
+  E = E->IgnoreParenCasts();
+  if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+    const ValueDecl *VD = DRE->getDecl();
+    if (isa<ImplicitParamDecl>(VD) || isa<ParmVarDecl>(VD))
+      return true;
+  }
+  return false;
+}
+
+const MacOSKeychainAPIChecker::ADFunctionInfo
+  MacOSKeychainAPIChecker::FunctionsToTrack[FunctionsToTrackSize] = {
+    {"SecKeychainItemCopyContent", 4, 3, ValidAPI},                       // 0
+    {"SecKeychainFindGenericPassword", 6, 3, ValidAPI},                   // 1
+    {"SecKeychainFindInternetPassword", 13, 3, ValidAPI},                 // 2
+    {"SecKeychainItemFreeContent", 1, InvalidIdx, ValidAPI},              // 3
+    {"SecKeychainItemCopyAttributesAndData", 5, 5, ValidAPI},             // 4
+    {"SecKeychainItemFreeAttributesAndData", 1, InvalidIdx, ValidAPI},    // 5
+    {"free", 0, InvalidIdx, ErrorAPI},                                    // 6
+    {"CFStringCreateWithBytesNoCopy", 1, InvalidIdx, PossibleAPI},        // 7
+};
+
+unsigned MacOSKeychainAPIChecker::getTrackedFunctionIndex(StringRef Name,
+                                                          bool IsAllocator) {
+  for (unsigned I = 0; I < FunctionsToTrackSize; ++I) {
+    ADFunctionInfo FI = FunctionsToTrack[I];
+    if (FI.Name != Name)
+      continue;
+    // Make sure the function is of the right type (allocator vs deallocator).
+    if (IsAllocator && (FI.DeallocatorIdx == InvalidIdx))
+      return InvalidIdx;
+    if (!IsAllocator && (FI.DeallocatorIdx != InvalidIdx))
+      return InvalidIdx;
+
+    return I;
+  }
+  // The function is not tracked.
+  return InvalidIdx;
+}
+
+static bool isBadDeallocationArgument(const MemRegion *Arg) {
+  if (!Arg)
+    return false;
+  if (isa<AllocaRegion>(Arg) ||
+      isa<BlockDataRegion>(Arg) ||
+      isa<TypedRegion>(Arg)) {
+    return true;
+  }
+  return false;
+}
+
+/// Given the address expression, retrieve the value it's pointing to. Assume
+/// that value is itself an address, and return the corresponding symbol.
+static SymbolRef getAsPointeeSymbol(const Expr *Expr,
+                                    CheckerContext &C) {
+  ProgramStateRef State = C.getState();
+  SVal ArgV = State->getSVal(Expr, C.getLocationContext());
+
+  if (Optional<loc::MemRegionVal> X = ArgV.getAs<loc::MemRegionVal>()) {
+    StoreManager& SM = C.getStoreManager();
+    SymbolRef sym = SM.getBinding(State->getStore(), *X).getAsLocSymbol();
+    if (sym)
+      return sym;
+  }
+  return 0;
+}
+
+// When checking for error code, we need to consider the following cases:
+// 1) noErr / [0]
+// 2) someErr / [1, inf]
+// 3) unknown
+// If noError, returns true iff (1).
+// If !noError, returns true iff (2).
+bool MacOSKeychainAPIChecker::definitelyReturnedError(SymbolRef RetSym,
+                                                      ProgramStateRef State,
+                                                      SValBuilder &Builder,
+                                                      bool noError) const {
+  DefinedOrUnknownSVal NoErrVal = Builder.makeIntVal(NoErr,
+    Builder.getSymbolManager().getType(RetSym));
+  DefinedOrUnknownSVal NoErr = Builder.evalEQ(State, NoErrVal,
+                                                     nonloc::SymbolVal(RetSym));
+  ProgramStateRef ErrState = State->assume(NoErr, noError);
+  if (ErrState == State) {
+    return true;
+  }
+
+  return false;
+}
+
+// Report deallocator mismatch. Remove the region from tracking - reporting a
+// missing free error after this one is redundant.
+void MacOSKeychainAPIChecker::
+  generateDeallocatorMismatchReport(const AllocationPair &AP,
+                                    const Expr *ArgExpr,
+                                    CheckerContext &C) const {
+  ProgramStateRef State = C.getState();
+  State = State->remove<AllocatedData>(AP.first);
+  ExplodedNode *N = C.addTransition(State);
+
+  if (!N)
+    return;
+  initBugType();
+  SmallString<80> sbuf;
+  llvm::raw_svector_ostream os(sbuf);
+  unsigned int PDeallocIdx =
+               FunctionsToTrack[AP.second->AllocatorIdx].DeallocatorIdx;
+
+  os << "Deallocator doesn't match the allocator: '"
+     << FunctionsToTrack[PDeallocIdx].Name << "' should be used.";
+  BugReport *Report = new BugReport(*BT, os.str(), N);
+  Report->addVisitor(new SecKeychainBugVisitor(AP.first));
+  Report->addRange(ArgExpr->getSourceRange());
+  markInteresting(Report, AP);
+  C.emitReport(Report);
+}
+
+void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE,
+                                           CheckerContext &C) const {
+  unsigned idx = InvalidIdx;
+  ProgramStateRef State = C.getState();
+
+  const FunctionDecl *FD = C.getCalleeDecl(CE);
+  if (!FD || FD->getKind() != Decl::Function)
+    return;
+  
+  StringRef funName = C.getCalleeName(FD);
+  if (funName.empty())
+    return;
+
+  // If it is a call to an allocator function, it could be a double allocation.
+  idx = getTrackedFunctionIndex(funName, true);
+  if (idx != InvalidIdx) {
+    const Expr *ArgExpr = CE->getArg(FunctionsToTrack[idx].Param);
+    if (SymbolRef V = getAsPointeeSymbol(ArgExpr, C))
+      if (const AllocationState *AS = State->get<AllocatedData>(V)) {
+        if (!definitelyReturnedError(AS->Region, State, C.getSValBuilder())) {
+          // Remove the value from the state. The new symbol will be added for
+          // tracking when the second allocator is processed in checkPostStmt().
+          State = State->remove<AllocatedData>(V);
+          ExplodedNode *N = C.addTransition(State);
+          if (!N)
+            return;
+          initBugType();
+          SmallString<128> sbuf;
+          llvm::raw_svector_ostream os(sbuf);
+          unsigned int DIdx = FunctionsToTrack[AS->AllocatorIdx].DeallocatorIdx;
+          os << "Allocated data should be released before another call to "
+              << "the allocator: missing a call to '"
+              << FunctionsToTrack[DIdx].Name
+              << "'.";
+          BugReport *Report = new BugReport(*BT, os.str(), N);
+          Report->addVisitor(new SecKeychainBugVisitor(V));
+          Report->addRange(ArgExpr->getSourceRange());
+          Report->markInteresting(AS->Region);
+          C.emitReport(Report);
+        }
+      }
+    return;
+  }
+
+  // Is it a call to one of deallocator functions?
+  idx = getTrackedFunctionIndex(funName, false);
+  if (idx == InvalidIdx)
+    return;
+
+  // Check the argument to the deallocator.
+  const Expr *ArgExpr = CE->getArg(FunctionsToTrack[idx].Param);
+  SVal ArgSVal = State->getSVal(ArgExpr, C.getLocationContext());
+
+  // Undef is reported by another checker.
+  if (ArgSVal.isUndef())
+    return;
+
+  SymbolRef ArgSM = ArgSVal.getAsLocSymbol();
+
+  // If the argument is coming from the heap, globals, or unknown, do not
+  // report it.
+  bool RegionArgIsBad = false;
+  if (!ArgSM) {
+    if (!isBadDeallocationArgument(ArgSVal.getAsRegion()))
+      return;
+    RegionArgIsBad = true;
+  }
+
+  // Is the argument to the call being tracked?
+  const AllocationState *AS = State->get<AllocatedData>(ArgSM);
+  if (!AS && FunctionsToTrack[idx].Kind != ValidAPI) {
+    return;
+  }
+  // If trying to free data which has not been allocated yet, report as a bug.
+  // TODO: We might want a more precise diagnostic for double free
+  // (that would involve tracking all the freed symbols in the checker state).
+  if (!AS || RegionArgIsBad) {
+    // It is possible that this is a false positive - the argument might
+    // have entered as an enclosing function parameter.
+    if (isEnclosingFunctionParam(ArgExpr))
+      return;
+
+    ExplodedNode *N = C.addTransition(State);
+    if (!N)
+      return;
+    initBugType();
+    BugReport *Report = new BugReport(*BT,
+        "Trying to free data which has not been allocated.", N);
+    Report->addRange(ArgExpr->getSourceRange());
+    if (AS)
+      Report->markInteresting(AS->Region);
+    C.emitReport(Report);
+    return;
+  }
+
+  // Process functions which might deallocate.
+  if (FunctionsToTrack[idx].Kind == PossibleAPI) {
+
+    if (funName == "CFStringCreateWithBytesNoCopy") {
+      const Expr *DeallocatorExpr = CE->getArg(5)->IgnoreParenCasts();
+      // NULL ~ default deallocator, so warn.
+      if (DeallocatorExpr->isNullPointerConstant(C.getASTContext(),
+          Expr::NPC_ValueDependentIsNotNull)) {
+        const AllocationPair AP = std::make_pair(ArgSM, AS);
+        generateDeallocatorMismatchReport(AP, ArgExpr, C);
+        return;
+      }
+      // One of the default allocators, so warn.
+      if (const DeclRefExpr *DE = dyn_cast<DeclRefExpr>(DeallocatorExpr)) {
+        StringRef DeallocatorName = DE->getFoundDecl()->getName();
+        if (DeallocatorName == "kCFAllocatorDefault" ||
+            DeallocatorName == "kCFAllocatorSystemDefault" ||
+            DeallocatorName == "kCFAllocatorMalloc") {
+          const AllocationPair AP = std::make_pair(ArgSM, AS);
+          generateDeallocatorMismatchReport(AP, ArgExpr, C);
+          return;
+        }
+        // If kCFAllocatorNull, which does not deallocate, we still have to
+        // find the deallocator.
+        if (DE->getFoundDecl()->getName() == "kCFAllocatorNull")
+          return;
+      }
+      // In all other cases, assume the user supplied a correct deallocator
+      // that will free memory so stop tracking.
+      State = State->remove<AllocatedData>(ArgSM);
+      C.addTransition(State);
+      return;
+    }
+
+    llvm_unreachable("We know of no other possible APIs.");
+  }
+
+  // The call is deallocating a value we previously allocated, so remove it
+  // from the next state.
+  State = State->remove<AllocatedData>(ArgSM);
+
+  // Check if the proper deallocator is used.
+  unsigned int PDeallocIdx = FunctionsToTrack[AS->AllocatorIdx].DeallocatorIdx;
+  if (PDeallocIdx != idx || (FunctionsToTrack[idx].Kind == ErrorAPI)) {
+    const AllocationPair AP = std::make_pair(ArgSM, AS);
+    generateDeallocatorMismatchReport(AP, ArgExpr, C);
+    return;
+  }
+
+  // If the buffer can be null and the return status can be an error,
+  // report a bad call to free.
+  if (State->assume(ArgSVal.castAs<DefinedSVal>(), false) &&
+      !definitelyDidnotReturnError(AS->Region, State, C.getSValBuilder())) {
+    ExplodedNode *N = C.addTransition(State);
+    if (!N)
+      return;
+    initBugType();
+    BugReport *Report = new BugReport(*BT,
+        "Only call free if a valid (non-NULL) buffer was returned.", N);
+    Report->addVisitor(new SecKeychainBugVisitor(ArgSM));
+    Report->addRange(ArgExpr->getSourceRange());
+    Report->markInteresting(AS->Region);
+    C.emitReport(Report);
+    return;
+  }
+
+  C.addTransition(State);
+}
+
+void MacOSKeychainAPIChecker::checkPostStmt(const CallExpr *CE,
+                                            CheckerContext &C) const {
+  ProgramStateRef State = C.getState();
+  const FunctionDecl *FD = C.getCalleeDecl(CE);
+  if (!FD || FD->getKind() != Decl::Function)
+    return;
+
+  StringRef funName = C.getCalleeName(FD);
+
+  // If a value has been allocated, add it to the set for tracking.
+  unsigned idx = getTrackedFunctionIndex(funName, true);
+  if (idx == InvalidIdx)
+    return;
+
+  const Expr *ArgExpr = CE->getArg(FunctionsToTrack[idx].Param);
+  // If the argument entered as an enclosing function parameter, skip it to
+  // avoid false positives.
+  if (isEnclosingFunctionParam(ArgExpr) &&
+      C.getLocationContext()->getParent() == 0)
+    return;
+
+  if (SymbolRef V = getAsPointeeSymbol(ArgExpr, C)) {
+    // If the argument points to something that's not a symbolic region, it
+    // can be:
+    //  - unknown (cannot reason about it)
+    //  - undefined (already reported by other checker)
+    //  - constant (null - should not be tracked,
+    //              other constant will generate a compiler warning)
+    //  - goto (should be reported by other checker)
+
+    // The call return value symbol should stay alive for as long as the
+    // allocated value symbol, since our diagnostics depend on the value
+    // returned by the call. Ex: Data should only be freed if noErr was
+    // returned during allocation.)
+    SymbolRef RetStatusSymbol =
+      State->getSVal(CE, C.getLocationContext()).getAsSymbol();
+    C.getSymbolManager().addSymbolDependency(V, RetStatusSymbol);
+
+    // Track the allocated value in the checker state.
+    State = State->set<AllocatedData>(V, AllocationState(ArgExpr, idx,
+                                                         RetStatusSymbol));
+    assert(State);
+    C.addTransition(State);
+  }
+}
+
+// TODO: This logic is the same as in Malloc checker.
+const ExplodedNode *
+MacOSKeychainAPIChecker::getAllocationNode(const ExplodedNode *N,
+                                           SymbolRef Sym,
+                                           CheckerContext &C) const {
+  const LocationContext *LeakContext = N->getLocationContext();
+  // Walk the ExplodedGraph backwards and find the first node that referred to
+  // the tracked symbol.
+  const ExplodedNode *AllocNode = N;
+
+  while (N) {
+    if (!N->getState()->get<AllocatedData>(Sym))
+      break;
+    // Allocation node, is the last node in the current context in which the
+    // symbol was tracked.
+    if (N->getLocationContext() == LeakContext)
+      AllocNode = N;
+    N = N->pred_empty() ? NULL : *(N->pred_begin());
+  }
+
+  return AllocNode;
+}
+
+BugReport *MacOSKeychainAPIChecker::
+  generateAllocatedDataNotReleasedReport(const AllocationPair &AP,
+                                         ExplodedNode *N,
+                                         CheckerContext &C) const {
+  const ADFunctionInfo &FI = FunctionsToTrack[AP.second->AllocatorIdx];
+  initBugType();
+  SmallString<70> sbuf;
+  llvm::raw_svector_ostream os(sbuf);
+  os << "Allocated data is not released: missing a call to '"
+      << FunctionsToTrack[FI.DeallocatorIdx].Name << "'.";
+
+  // Most bug reports are cached at the location where they occurred.
+  // With leaks, we want to unique them by the location where they were
+  // allocated, and only report a single path.
+  PathDiagnosticLocation LocUsedForUniqueing;
+  const ExplodedNode *AllocNode = getAllocationNode(N, AP.first, C);
+  const Stmt *AllocStmt = 0;
+  ProgramPoint P = AllocNode->getLocation();
+  if (Optional<CallExitEnd> Exit = P.getAs<CallExitEnd>())
+    AllocStmt = Exit->getCalleeContext()->getCallSite();
+  else if (Optional<clang::PostStmt> PS = P.getAs<clang::PostStmt>())
+    AllocStmt = PS->getStmt();
+
+  if (AllocStmt)
+    LocUsedForUniqueing = PathDiagnosticLocation::createBegin(AllocStmt,
+                                              C.getSourceManager(),
+                                              AllocNode->getLocationContext());
+
+  BugReport *Report = new BugReport(*BT, os.str(), N, LocUsedForUniqueing,
+                                   AllocNode->getLocationContext()->getDecl());
+
+  Report->addVisitor(new SecKeychainBugVisitor(AP.first));
+  markInteresting(Report, AP);
+  return Report;
+}
+
+void MacOSKeychainAPIChecker::checkDeadSymbols(SymbolReaper &SR,
+                                               CheckerContext &C) const {
+  ProgramStateRef State = C.getState();
+  AllocatedDataTy ASet = State->get<AllocatedData>();
+  if (ASet.isEmpty())
+    return;
+
+  bool Changed = false;
+  AllocationPairVec Errors;
+  for (AllocatedDataTy::iterator I = ASet.begin(), E = ASet.end(); I != E; ++I) {
+    if (SR.isLive(I->first))
+      continue;
+
+    Changed = true;
+    State = State->remove<AllocatedData>(I->first);
+    // If the allocated symbol is null or if the allocation call might have
+    // returned an error, do not report.
+    ConstraintManager &CMgr = State->getConstraintManager();
+    ConditionTruthVal AllocFailed = CMgr.isNull(State, I.getKey());
+    if (AllocFailed.isConstrainedTrue() ||
+        definitelyReturnedError(I->second.Region, State, C.getSValBuilder()))
+      continue;
+    Errors.push_back(std::make_pair(I->first, &I->second));
+  }
+  if (!Changed) {
+    // Generate the new, cleaned up state.
+    C.addTransition(State);
+    return;
+  }
+
+  static SimpleProgramPointTag Tag("MacOSKeychainAPIChecker : DeadSymbolsLeak");
+  ExplodedNode *N = C.addTransition(C.getState(), C.getPredecessor(), &Tag);
+
+  // Generate the error reports.
+  for (AllocationPairVec::iterator I = Errors.begin(), E = Errors.end();
+                                                       I != E; ++I) {
+    C.emitReport(generateAllocatedDataNotReleasedReport(*I, N, C));
+  }
+
+  // Generate the new, cleaned up state.
+  C.addTransition(State, N);
+}
+
+
+PathDiagnosticPiece *MacOSKeychainAPIChecker::SecKeychainBugVisitor::VisitNode(
+                                                      const ExplodedNode *N,
+                                                      const ExplodedNode *PrevN,
+                                                      BugReporterContext &BRC,
+                                                      BugReport &BR) {
+  const AllocationState *AS = N->getState()->get<AllocatedData>(Sym);
+  if (!AS)
+    return 0;
+  const AllocationState *ASPrev = PrevN->getState()->get<AllocatedData>(Sym);
+  if (ASPrev)
+    return 0;
+
+  // (!ASPrev && AS) ~ We started tracking symbol in node N, it must be the
+  // allocation site.
+  const CallExpr *CE =
+      cast<CallExpr>(N->getLocation().castAs<StmtPoint>().getStmt());
+  const FunctionDecl *funDecl = CE->getDirectCallee();
+  assert(funDecl && "We do not support indirect function calls as of now.");
+  StringRef funName = funDecl->getName();
+
+  // Get the expression of the corresponding argument.
+  unsigned Idx = getTrackedFunctionIndex(funName, true);
+  assert(Idx != InvalidIdx && "This should be a call to an allocator.");
+  const Expr *ArgExpr = CE->getArg(FunctionsToTrack[Idx].Param);
+  PathDiagnosticLocation Pos(ArgExpr, BRC.getSourceManager(),
+                             N->getLocationContext());
+  return new PathDiagnosticEventPiece(Pos, "Data is allocated here.");
+}
+
+void ento::registerMacOSKeychainAPIChecker(CheckerManager &mgr) {
+  mgr.registerChecker<MacOSKeychainAPIChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
new file mode 100644
index 0000000..32ebb51
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
@@ -0,0 +1,128 @@
+// MacOSXAPIChecker.h - Checks proper use of various MacOS X APIs --*- C++ -*-//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines MacOSXAPIChecker, which is an assortment of checks on calls
+// to various, widely used Apple APIs.
+//
+// FIXME: What's currently in BasicObjCFoundationChecks.cpp should be migrated
+// to here, using the new Checker interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class MacOSXAPIChecker : public Checker< check::PreStmt<CallExpr> > {
+  mutable OwningPtr<BugType> BT_dispatchOnce;
+
+public:
+  void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+
+  void CheckDispatchOnce(CheckerContext &C, const CallExpr *CE,
+                         StringRef FName) const;
+
+  typedef void (MacOSXAPIChecker::*SubChecker)(CheckerContext &,
+                                               const CallExpr *,
+                                               StringRef FName) const;
+};
+} //end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// dispatch_once and dispatch_once_f
+//===----------------------------------------------------------------------===//
+
+void MacOSXAPIChecker::CheckDispatchOnce(CheckerContext &C, const CallExpr *CE,
+                                         StringRef FName) const {
+  if (CE->getNumArgs() < 1)
+    return;
+
+  // Check if the first argument is stack allocated.  If so, issue a warning
+  // because that's likely to be bad news.
+  ProgramStateRef state = C.getState();
+  const MemRegion *R =
+    state->getSVal(CE->getArg(0), C.getLocationContext()).getAsRegion();
+  if (!R || !isa<StackSpaceRegion>(R->getMemorySpace()))
+    return;
+
+  ExplodedNode *N = C.generateSink(state);
+  if (!N)
+    return;
+
+  if (!BT_dispatchOnce)
+    BT_dispatchOnce.reset(new BugType("Improper use of 'dispatch_once'",
+                                      "API Misuse (Apple)"));
+
+  // Handle _dispatch_once.  In some versions of the OS X SDK we have the case
+  // that dispatch_once is a macro that wraps a call to _dispatch_once.
+  // _dispatch_once is then a function which then calls the real dispatch_once.
+  // Users do not care; they just want the warning at the top-level call.
+  if (CE->getLocStart().isMacroID()) {
+    StringRef TrimmedFName = FName.ltrim("_");
+    if (TrimmedFName != FName)
+      FName = TrimmedFName;
+  }
+  
+  SmallString<256> S;
+  llvm::raw_svector_ostream os(S);
+  os << "Call to '" << FName << "' uses";
+  if (const VarRegion *VR = dyn_cast<VarRegion>(R))
+    os << " the local variable '" << VR->getDecl()->getName() << '\'';
+  else
+    os << " stack allocated memory";
+  os << " for the predicate value.  Using such transient memory for "
+        "the predicate is potentially dangerous.";
+  if (isa<VarRegion>(R) && isa<StackLocalsSpaceRegion>(R->getMemorySpace()))
+    os << "  Perhaps you intended to declare the variable as 'static'?";
+
+  BugReport *report = new BugReport(*BT_dispatchOnce, os.str(), N);
+  report->addRange(CE->getArg(0)->getSourceRange());
+  C.emitReport(report);
+}
+
+//===----------------------------------------------------------------------===//
+// Central dispatch function.
+//===----------------------------------------------------------------------===//
+
+void MacOSXAPIChecker::checkPreStmt(const CallExpr *CE,
+                                    CheckerContext &C) const {
+  StringRef Name = C.getCalleeName(CE);
+  if (Name.empty())
+    return;
+
+  SubChecker SC =
+    llvm::StringSwitch<SubChecker>(Name)
+      .Cases("dispatch_once",
+             "_dispatch_once",
+             "dispatch_once_f",
+             &MacOSXAPIChecker::CheckDispatchOnce)
+      .Default(NULL);
+
+  if (SC)
+    (this->*SC)(C, CE, Name);
+}
+
+//===----------------------------------------------------------------------===//
+// Registration.
+//===----------------------------------------------------------------------===//
+
+void ento::registerMacOSXAPIChecker(CheckerManager &mgr) {
+  mgr.registerChecker<MacOSXAPIChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/Makefile b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/Makefile
new file mode 100644
index 0000000..2582908
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/Makefile
@@ -0,0 +1,24 @@
+##===- clang/lib/Checker/Makefile --------------------------*- Makefile -*-===##
+# 
+#                     The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+# 
+##===----------------------------------------------------------------------===##
+#
+# This implements analyses built on top of source-level CFGs. 
+#
+##===----------------------------------------------------------------------===##
+
+CLANG_LEVEL := ../../..
+LIBRARYNAME := clangStaticAnalyzerCheckers
+
+BUILT_SOURCES = Checkers.inc
+TABLEGEN_INC_FILES_COMMON = 1
+
+include $(CLANG_LEVEL)/Makefile
+
+$(ObjDir)/Checkers.inc.tmp : Checkers.td $(PROJ_SRC_DIR)/$(CLANG_LEVEL)/include/clang/StaticAnalyzer/Checkers/CheckerBase.td $(CLANG_TBLGEN) $(ObjDir)/.dir
+	$(Echo) "Building Clang SA Checkers tables with tblgen"
+	$(Verb) $(ClangTableGen) -gen-clang-sa-checkers -I $(PROJ_SRC_DIR)/$(CLANG_LEVEL)/include -o $(call SYSPATH, $@) $<
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
new file mode 100644
index 0000000..5d3eb65
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -0,0 +1,2172 @@
+//=== MallocChecker.cpp - A malloc/free checker -------------------*- C++ -*--//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines malloc/free checker, which checks for potential memory
+// leaks, double free, and use-after-free problems.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "InterCheckerAPI.h"
+#include "clang/AST/Attr.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "llvm/ADT/ImmutableMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include <climits>
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+// Used to check correspondence between allocators and deallocators.
+enum AllocationFamily {
+  AF_None,
+  AF_Malloc,
+  AF_CXXNew,
+  AF_CXXNewArray
+};
+
+class RefState {
+  enum Kind { // Reference to allocated memory.
+              Allocated,
+              // Reference to released/freed memory.
+              Released,
+              // The responsibility for freeing resources has transfered from
+              // this reference. A relinquished symbol should not be freed.
+              Relinquished,
+              // We are no longer guaranteed to have observed all manipulations
+              // of this pointer/memory. For example, it could have been
+              // passed as a parameter to an opaque function.
+              Escaped
+  };
+
+  const Stmt *S;
+  unsigned K : 2; // Kind enum, but stored as a bitfield.
+  unsigned Family : 30; // Rest of 32-bit word, currently just an allocation 
+                        // family.
+
+  RefState(Kind k, const Stmt *s, unsigned family) 
+    : S(s), K(k), Family(family) {
+    assert(family != AF_None);
+  }
+public:
+  bool isAllocated() const { return K == Allocated; }
+  bool isReleased() const { return K == Released; }
+  bool isRelinquished() const { return K == Relinquished; }
+  bool isEscaped() const { return K == Escaped; }
+  AllocationFamily getAllocationFamily() const {
+    return (AllocationFamily)Family;
+  }
+  const Stmt *getStmt() const { return S; }
+
+  bool operator==(const RefState &X) const {
+    return K == X.K && S == X.S && Family == X.Family;
+  }
+
+  static RefState getAllocated(unsigned family, const Stmt *s) {
+    return RefState(Allocated, s, family);
+  }
+  static RefState getReleased(unsigned family, const Stmt *s) { 
+    return RefState(Released, s, family);
+  }
+  static RefState getRelinquished(unsigned family, const Stmt *s) {
+    return RefState(Relinquished, s, family);
+  }
+  static RefState getEscaped(const RefState *RS) {
+    return RefState(Escaped, RS->getStmt(), RS->getAllocationFamily());
+  }
+
+  void Profile(llvm::FoldingSetNodeID &ID) const {
+    ID.AddInteger(K);
+    ID.AddPointer(S);
+    ID.AddInteger(Family);
+  }
+
+  void dump(raw_ostream &OS) const {
+    static const char *Table[] = {
+      "Allocated",
+      "Released",
+      "Relinquished"
+    };
+    OS << Table[(unsigned) K];
+  }
+
+  LLVM_ATTRIBUTE_USED void dump() const {
+    dump(llvm::errs());
+  }
+};
+
+enum ReallocPairKind {
+  RPToBeFreedAfterFailure,
+  // The symbol has been freed when reallocation failed.
+  RPIsFreeOnFailure,
+  // The symbol does not need to be freed after reallocation fails.
+  RPDoNotTrackAfterFailure
+};
+
+/// \class ReallocPair
+/// \brief Stores information about the symbol being reallocated by a call to
+/// 'realloc' to allow modeling failed reallocation later in the path.
+struct ReallocPair {
+  // \brief The symbol which realloc reallocated.
+  SymbolRef ReallocatedSym;
+  ReallocPairKind Kind;
+
+  ReallocPair(SymbolRef S, ReallocPairKind K) :
+    ReallocatedSym(S), Kind(K) {}
+  void Profile(llvm::FoldingSetNodeID &ID) const {
+    ID.AddInteger(Kind);
+    ID.AddPointer(ReallocatedSym);
+  }
+  bool operator==(const ReallocPair &X) const {
+    return ReallocatedSym == X.ReallocatedSym &&
+           Kind == X.Kind;
+  }
+};
+
+typedef std::pair<const ExplodedNode*, const MemRegion*> LeakInfo;
+
+class MallocChecker : public Checker<check::DeadSymbols,
+                                     check::PointerEscape,
+                                     check::ConstPointerEscape,
+                                     check::PreStmt<ReturnStmt>,
+                                     check::PreCall,
+                                     check::PostStmt<CallExpr>,
+                                     check::PostStmt<CXXNewExpr>,
+                                     check::PreStmt<CXXDeleteExpr>,
+                                     check::PostStmt<BlockExpr>,
+                                     check::PostObjCMessage,
+                                     check::Location,
+                                     eval::Assume>
+{
+  mutable OwningPtr<BugType> BT_DoubleFree;
+  mutable OwningPtr<BugType> BT_Leak;
+  mutable OwningPtr<BugType> BT_UseFree;
+  mutable OwningPtr<BugType> BT_BadFree;
+  mutable OwningPtr<BugType> BT_MismatchedDealloc;
+  mutable OwningPtr<BugType> BT_OffsetFree;
+  mutable IdentifierInfo *II_malloc, *II_free, *II_realloc, *II_calloc,
+                         *II_valloc, *II_reallocf, *II_strndup, *II_strdup;
+
+public:
+  MallocChecker() : II_malloc(0), II_free(0), II_realloc(0), II_calloc(0),
+                    II_valloc(0), II_reallocf(0), II_strndup(0), II_strdup(0) {}
+
+  /// In pessimistic mode, the checker assumes that it does not know which
+  /// functions might free the memory.
+  struct ChecksFilter {
+    DefaultBool CMallocPessimistic;
+    DefaultBool CMallocOptimistic;
+    DefaultBool CNewDeleteChecker;
+    DefaultBool CNewDeleteLeaksChecker;
+    DefaultBool CMismatchedDeallocatorChecker;
+  };
+
+  ChecksFilter Filter;
+
+  void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+  void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
+  void checkPostStmt(const CXXNewExpr *NE, CheckerContext &C) const;
+  void checkPreStmt(const CXXDeleteExpr *DE, CheckerContext &C) const;
+  void checkPostObjCMessage(const ObjCMethodCall &Call, CheckerContext &C) const;
+  void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const;
+  void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
+  void checkPreStmt(const ReturnStmt *S, CheckerContext &C) const;
+  ProgramStateRef evalAssume(ProgramStateRef state, SVal Cond,
+                            bool Assumption) const;
+  void checkLocation(SVal l, bool isLoad, const Stmt *S,
+                     CheckerContext &C) const;
+
+  ProgramStateRef checkPointerEscape(ProgramStateRef State,
+                                    const InvalidatedSymbols &Escaped,
+                                    const CallEvent *Call,
+                                    PointerEscapeKind Kind) const;
+  ProgramStateRef checkConstPointerEscape(ProgramStateRef State,
+                                          const InvalidatedSymbols &Escaped,
+                                          const CallEvent *Call,
+                                          PointerEscapeKind Kind) const;
+
+  void printState(raw_ostream &Out, ProgramStateRef State,
+                  const char *NL, const char *Sep) const;
+
+private:
+  void initIdentifierInfo(ASTContext &C) const;
+
+  /// \brief Determine family of a deallocation expression.
+  AllocationFamily getAllocationFamily(CheckerContext &C, const Stmt *S) const;
+
+  /// \brief Print names of allocators and deallocators.
+  ///
+  /// \returns true on success.
+  bool printAllocDeallocName(raw_ostream &os, CheckerContext &C, 
+                             const Expr *E) const;
+
+  /// \brief Print expected name of an allocator based on the deallocator's
+  /// family derived from the DeallocExpr.
+  void printExpectedAllocName(raw_ostream &os, CheckerContext &C, 
+                              const Expr *DeallocExpr) const;
+  /// \brief Print expected name of a deallocator based on the allocator's 
+  /// family.
+  void printExpectedDeallocName(raw_ostream &os, AllocationFamily Family) const;
+
+  ///@{
+  /// Check if this is one of the functions which can allocate/reallocate memory 
+  /// pointed to by one of its arguments.
+  bool isMemFunction(const FunctionDecl *FD, ASTContext &C) const;
+  bool isFreeFunction(const FunctionDecl *FD, ASTContext &C) const;
+  bool isAllocationFunction(const FunctionDecl *FD, ASTContext &C) const;
+  bool isStandardNewDelete(const FunctionDecl *FD, ASTContext &C) const;
+  ///@}
+  static ProgramStateRef MallocMemReturnsAttr(CheckerContext &C,
+                                              const CallExpr *CE,
+                                              const OwnershipAttr* Att);
+  static ProgramStateRef MallocMemAux(CheckerContext &C, const CallExpr *CE,
+                                     const Expr *SizeEx, SVal Init,
+                                     ProgramStateRef State,
+                                     AllocationFamily Family = AF_Malloc) {
+    return MallocMemAux(C, CE,
+                        State->getSVal(SizeEx, C.getLocationContext()),
+                        Init, State, Family);
+  }
+
+  static ProgramStateRef MallocMemAux(CheckerContext &C, const CallExpr *CE,
+                                     SVal SizeEx, SVal Init,
+                                     ProgramStateRef State,
+                                     AllocationFamily Family = AF_Malloc);
+
+  /// Update the RefState to reflect the new memory allocation.
+  static ProgramStateRef 
+  MallocUpdateRefState(CheckerContext &C, const Expr *E, ProgramStateRef State,
+                       AllocationFamily Family = AF_Malloc);
+
+  ProgramStateRef FreeMemAttr(CheckerContext &C, const CallExpr *CE,
+                              const OwnershipAttr* Att) const;
+  ProgramStateRef FreeMemAux(CheckerContext &C, const CallExpr *CE,
+                             ProgramStateRef state, unsigned Num,
+                             bool Hold,
+                             bool &ReleasedAllocated,
+                             bool ReturnsNullOnFailure = false) const;
+  ProgramStateRef FreeMemAux(CheckerContext &C, const Expr *Arg,
+                             const Expr *ParentExpr,
+                             ProgramStateRef State,
+                             bool Hold,
+                             bool &ReleasedAllocated,
+                             bool ReturnsNullOnFailure = false) const;
+
+  ProgramStateRef ReallocMem(CheckerContext &C, const CallExpr *CE,
+                             bool FreesMemOnFailure) const;
+  static ProgramStateRef CallocMem(CheckerContext &C, const CallExpr *CE);
+  
+  ///\brief Check if the memory associated with this symbol was released.
+  bool isReleased(SymbolRef Sym, CheckerContext &C) const;
+
+  bool checkUseAfterFree(SymbolRef Sym, CheckerContext &C, const Stmt *S) const;
+
+  /// Check if the function is known not to free memory, or if it is
+  /// "interesting" and should be modeled explicitly.
+  ///
+  /// We assume that pointers do not escape through calls to system functions
+  /// not handled by this checker.
+  bool doesNotFreeMemOrInteresting(const CallEvent *Call,
+                                   ProgramStateRef State) const;
+
+  // Implementation of the checkPointerEscape callabcks.
+  ProgramStateRef checkPointerEscapeAux(ProgramStateRef State,
+                                  const InvalidatedSymbols &Escaped,
+                                  const CallEvent *Call,
+                                  PointerEscapeKind Kind,
+                                  bool(*CheckRefState)(const RefState*)) const;
+
+  ///@{
+  /// Tells if a given family/call/symbol is tracked by the current checker.
+  bool isTrackedByCurrentChecker(AllocationFamily Family) const;
+  bool isTrackedByCurrentChecker(CheckerContext &C,
+                                 const Stmt *AllocDeallocStmt) const;
+  bool isTrackedByCurrentChecker(CheckerContext &C, SymbolRef Sym) const;
+  ///@}
+  static bool SummarizeValue(raw_ostream &os, SVal V);
+  static bool SummarizeRegion(raw_ostream &os, const MemRegion *MR);
+  void ReportBadFree(CheckerContext &C, SVal ArgVal, SourceRange Range, 
+                     const Expr *DeallocExpr) const;
+  void ReportMismatchedDealloc(CheckerContext &C, SourceRange Range,
+                               const Expr *DeallocExpr, const RefState *RS,
+                               SymbolRef Sym) const;
+  void ReportOffsetFree(CheckerContext &C, SVal ArgVal, SourceRange Range, 
+                        const Expr *DeallocExpr, 
+                        const Expr *AllocExpr = 0) const;
+  void ReportUseAfterFree(CheckerContext &C, SourceRange Range,
+                          SymbolRef Sym) const;
+  void ReportDoubleFree(CheckerContext &C, SourceRange Range, bool Released,
+                        SymbolRef Sym, SymbolRef PrevSym) const;
+
+  /// Find the location of the allocation for Sym on the path leading to the
+  /// exploded node N.
+  LeakInfo getAllocationSite(const ExplodedNode *N, SymbolRef Sym,
+                             CheckerContext &C) const;
+
+  void reportLeak(SymbolRef Sym, ExplodedNode *N, CheckerContext &C) const;
+
+  /// The bug visitor which allows us to print extra diagnostics along the
+  /// BugReport path. For example, showing the allocation site of the leaked
+  /// region.
+  class MallocBugVisitor : public BugReporterVisitorImpl<MallocBugVisitor> {
+  protected:
+    enum NotificationMode {
+      Normal,
+      ReallocationFailed
+    };
+
+    // The allocated region symbol tracked by the main analysis.
+    SymbolRef Sym;
+
+    // The mode we are in, i.e. what kind of diagnostics will be emitted.
+    NotificationMode Mode;
+
+    // A symbol from when the primary region should have been reallocated.
+    SymbolRef FailedReallocSymbol;
+
+    bool IsLeak;
+
+  public:
+    MallocBugVisitor(SymbolRef S, bool isLeak = false)
+       : Sym(S), Mode(Normal), FailedReallocSymbol(0), IsLeak(isLeak) {}
+
+    virtual ~MallocBugVisitor() {}
+
+    void Profile(llvm::FoldingSetNodeID &ID) const {
+      static int X = 0;
+      ID.AddPointer(&X);
+      ID.AddPointer(Sym);
+    }
+
+    inline bool isAllocated(const RefState *S, const RefState *SPrev,
+                            const Stmt *Stmt) {
+      // Did not track -> allocated. Other state (released) -> allocated.
+      return (Stmt && (isa<CallExpr>(Stmt) || isa<CXXNewExpr>(Stmt)) &&
+              (S && S->isAllocated()) && (!SPrev || !SPrev->isAllocated()));
+    }
+
+    inline bool isReleased(const RefState *S, const RefState *SPrev,
+                           const Stmt *Stmt) {
+      // Did not track -> released. Other state (allocated) -> released.
+      return (Stmt && (isa<CallExpr>(Stmt) || isa<CXXDeleteExpr>(Stmt)) &&
+              (S && S->isReleased()) && (!SPrev || !SPrev->isReleased()));
+    }
+
+    inline bool isRelinquished(const RefState *S, const RefState *SPrev,
+                               const Stmt *Stmt) {
+      // Did not track -> relinquished. Other state (allocated) -> relinquished.
+      return (Stmt && (isa<CallExpr>(Stmt) || isa<ObjCMessageExpr>(Stmt) ||
+                                              isa<ObjCPropertyRefExpr>(Stmt)) &&
+              (S && S->isRelinquished()) &&
+              (!SPrev || !SPrev->isRelinquished()));
+    }
+
+    inline bool isReallocFailedCheck(const RefState *S, const RefState *SPrev,
+                                     const Stmt *Stmt) {
+      // If the expression is not a call, and the state change is
+      // released -> allocated, it must be the realloc return value
+      // check. If we have to handle more cases here, it might be cleaner just
+      // to track this extra bit in the state itself.
+      return ((!Stmt || !isa<CallExpr>(Stmt)) &&
+              (S && S->isAllocated()) && (SPrev && !SPrev->isAllocated()));
+    }
+
+    PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+                                   const ExplodedNode *PrevN,
+                                   BugReporterContext &BRC,
+                                   BugReport &BR);
+
+    PathDiagnosticPiece* getEndPath(BugReporterContext &BRC,
+                                    const ExplodedNode *EndPathNode,
+                                    BugReport &BR) {
+      if (!IsLeak)
+        return 0;
+
+      PathDiagnosticLocation L =
+        PathDiagnosticLocation::createEndOfPath(EndPathNode,
+                                                BRC.getSourceManager());
+      // Do not add the statement itself as a range in case of leak.
+      return new PathDiagnosticEventPiece(L, BR.getDescription(), false);
+    }
+
+  private:
+    class StackHintGeneratorForReallocationFailed
+        : public StackHintGeneratorForSymbol {
+    public:
+      StackHintGeneratorForReallocationFailed(SymbolRef S, StringRef M)
+        : StackHintGeneratorForSymbol(S, M) {}
+
+      virtual std::string getMessageForArg(const Expr *ArgE, unsigned ArgIndex) {
+        // Printed parameters start at 1, not 0.
+        ++ArgIndex;
+
+        SmallString<200> buf;
+        llvm::raw_svector_ostream os(buf);
+
+        os << "Reallocation of " << ArgIndex << llvm::getOrdinalSuffix(ArgIndex)
+           << " parameter failed";
+
+        return os.str();
+      }
+
+      virtual std::string getMessageForReturn(const CallExpr *CallExpr) {
+        return "Reallocation of returned value failed";
+      }
+    };
+  };
+};
+} // end anonymous namespace
+
+REGISTER_MAP_WITH_PROGRAMSTATE(RegionState, SymbolRef, RefState)
+REGISTER_MAP_WITH_PROGRAMSTATE(ReallocPairs, SymbolRef, ReallocPair)
+
+// A map from the freed symbol to the symbol representing the return value of 
+// the free function.
+REGISTER_MAP_WITH_PROGRAMSTATE(FreeReturnValue, SymbolRef, SymbolRef)
+
+namespace {
+class StopTrackingCallback : public SymbolVisitor {
+  ProgramStateRef state;
+public:
+  StopTrackingCallback(ProgramStateRef st) : state(st) {}
+  ProgramStateRef getState() const { return state; }
+
+  bool VisitSymbol(SymbolRef sym) {
+    state = state->remove<RegionState>(sym);
+    return true;
+  }
+};
+} // end anonymous namespace
+
+void MallocChecker::initIdentifierInfo(ASTContext &Ctx) const {
+  if (II_malloc)
+    return;
+  II_malloc = &Ctx.Idents.get("malloc");
+  II_free = &Ctx.Idents.get("free");
+  II_realloc = &Ctx.Idents.get("realloc");
+  II_reallocf = &Ctx.Idents.get("reallocf");
+  II_calloc = &Ctx.Idents.get("calloc");
+  II_valloc = &Ctx.Idents.get("valloc");
+  II_strdup = &Ctx.Idents.get("strdup");
+  II_strndup = &Ctx.Idents.get("strndup");
+}
+
+bool MallocChecker::isMemFunction(const FunctionDecl *FD, ASTContext &C) const {
+  if (isFreeFunction(FD, C))
+    return true;
+
+  if (isAllocationFunction(FD, C))
+    return true;
+
+  if (isStandardNewDelete(FD, C))
+    return true;
+
+  return false;
+}
+
+bool MallocChecker::isAllocationFunction(const FunctionDecl *FD,
+                                         ASTContext &C) const {
+  if (!FD)
+    return false;
+
+  if (FD->getKind() == Decl::Function) {
+    IdentifierInfo *FunI = FD->getIdentifier();
+    initIdentifierInfo(C);
+
+    if (FunI == II_malloc || FunI == II_realloc ||
+        FunI == II_reallocf || FunI == II_calloc || FunI == II_valloc ||
+        FunI == II_strdup || FunI == II_strndup)
+      return true;
+  }
+
+  if (Filter.CMallocOptimistic && FD->hasAttrs())
+    for (specific_attr_iterator<OwnershipAttr>
+           i = FD->specific_attr_begin<OwnershipAttr>(),
+           e = FD->specific_attr_end<OwnershipAttr>();
+           i != e; ++i)
+      if ((*i)->getOwnKind() == OwnershipAttr::Returns)
+        return true;
+  return false;
+}
+
+bool MallocChecker::isFreeFunction(const FunctionDecl *FD, ASTContext &C) const {
+  if (!FD)
+    return false;
+
+  if (FD->getKind() == Decl::Function) {
+    IdentifierInfo *FunI = FD->getIdentifier();
+    initIdentifierInfo(C);
+
+    if (FunI == II_free || FunI == II_realloc || FunI == II_reallocf)
+      return true;
+  }
+
+  if (Filter.CMallocOptimistic && FD->hasAttrs())
+    for (specific_attr_iterator<OwnershipAttr>
+           i = FD->specific_attr_begin<OwnershipAttr>(),
+           e = FD->specific_attr_end<OwnershipAttr>();
+           i != e; ++i)
+      if ((*i)->getOwnKind() == OwnershipAttr::Takes ||
+          (*i)->getOwnKind() == OwnershipAttr::Holds)
+        return true;
+  return false;
+}
+
+// Tells if the callee is one of the following:
+// 1) A global non-placement new/delete operator function.
+// 2) A global placement operator function with the single placement argument
+//    of type std::nothrow_t.
+bool MallocChecker::isStandardNewDelete(const FunctionDecl *FD,
+                                        ASTContext &C) const {
+  if (!FD)
+    return false;
+
+  OverloadedOperatorKind Kind = FD->getOverloadedOperator();
+  if (Kind != OO_New && Kind != OO_Array_New && 
+      Kind != OO_Delete && Kind != OO_Array_Delete)
+    return false;
+
+  // Skip all operator new/delete methods.
+  if (isa<CXXMethodDecl>(FD))
+    return false;
+
+  // Return true if tested operator is a standard placement nothrow operator.
+  if (FD->getNumParams() == 2) {
+    QualType T = FD->getParamDecl(1)->getType();
+    if (const IdentifierInfo *II = T.getBaseTypeIdentifier())
+      return II->getName().equals("nothrow_t");
+  }
+
+  // Skip placement operators.
+  if (FD->getNumParams() != 1 || FD->isVariadic())
+    return false;
+
+  // One of the standard new/new[]/delete/delete[] non-placement operators.
+  return true;
+}
+
+void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const {
+  if (C.wasInlined)
+    return;
+  
+  const FunctionDecl *FD = C.getCalleeDecl(CE);
+  if (!FD)
+    return;
+
+  ProgramStateRef State = C.getState();
+  bool ReleasedAllocatedMemory = false;
+
+  if (FD->getKind() == Decl::Function) {
+    initIdentifierInfo(C.getASTContext());
+    IdentifierInfo *FunI = FD->getIdentifier();
+
+    if (FunI == II_malloc || FunI == II_valloc) {
+      if (CE->getNumArgs() < 1)
+        return;
+      State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State);
+    } else if (FunI == II_realloc) {
+      State = ReallocMem(C, CE, false);
+    } else if (FunI == II_reallocf) {
+      State = ReallocMem(C, CE, true);
+    } else if (FunI == II_calloc) {
+      State = CallocMem(C, CE);
+    } else if (FunI == II_free) {
+      State = FreeMemAux(C, CE, State, 0, false, ReleasedAllocatedMemory);
+    } else if (FunI == II_strdup) {
+      State = MallocUpdateRefState(C, CE, State);
+    } else if (FunI == II_strndup) {
+      State = MallocUpdateRefState(C, CE, State);
+    }
+    else if (isStandardNewDelete(FD, C.getASTContext())) {
+      // Process direct calls to operator new/new[]/delete/delete[] functions
+      // as distinct from new/new[]/delete/delete[] expressions that are 
+      // processed by the checkPostStmt callbacks for CXXNewExpr and 
+      // CXXDeleteExpr.
+      OverloadedOperatorKind K = FD->getOverloadedOperator();
+      if (K == OO_New)
+        State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State,
+                             AF_CXXNew);
+      else if (K == OO_Array_New)
+        State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State,
+                             AF_CXXNewArray);
+      else if (K == OO_Delete || K == OO_Array_Delete)
+        State = FreeMemAux(C, CE, State, 0, false, ReleasedAllocatedMemory);
+      else
+        llvm_unreachable("not a new/delete operator");
+    }
+  }
+
+  if (Filter.CMallocOptimistic || Filter.CMismatchedDeallocatorChecker) {
+    // Check all the attributes, if there are any.
+    // There can be multiple of these attributes.
+    if (FD->hasAttrs())
+      for (specific_attr_iterator<OwnershipAttr>
+          i = FD->specific_attr_begin<OwnershipAttr>(),
+          e = FD->specific_attr_end<OwnershipAttr>();
+          i != e; ++i) {
+        switch ((*i)->getOwnKind()) {
+        case OwnershipAttr::Returns:
+          State = MallocMemReturnsAttr(C, CE, *i);
+          break;
+        case OwnershipAttr::Takes:
+        case OwnershipAttr::Holds:
+          State = FreeMemAttr(C, CE, *i);
+          break;
+        }
+      }
+  }
+  C.addTransition(State);
+}
+
+void MallocChecker::checkPostStmt(const CXXNewExpr *NE, 
+                                  CheckerContext &C) const {
+
+  if (NE->getNumPlacementArgs())
+    for (CXXNewExpr::const_arg_iterator I = NE->placement_arg_begin(),
+         E = NE->placement_arg_end(); I != E; ++I)
+      if (SymbolRef Sym = C.getSVal(*I).getAsSymbol())
+        checkUseAfterFree(Sym, C, *I);
+
+  if (!isStandardNewDelete(NE->getOperatorNew(), C.getASTContext()))
+    return;
+
+  ProgramStateRef State = C.getState();
+  // The return value from operator new is bound to a specified initialization 
+  // value (if any) and we don't want to loose this value. So we call 
+  // MallocUpdateRefState() instead of MallocMemAux() which breakes the 
+  // existing binding.
+  State = MallocUpdateRefState(C, NE, State, NE->isArray() ? AF_CXXNewArray 
+                                                           : AF_CXXNew);
+  C.addTransition(State);
+}
+
+void MallocChecker::checkPreStmt(const CXXDeleteExpr *DE, 
+                                 CheckerContext &C) const {
+
+  if (!Filter.CNewDeleteChecker)
+    if (SymbolRef Sym = C.getSVal(DE->getArgument()).getAsSymbol())
+      checkUseAfterFree(Sym, C, DE->getArgument());
+
+  if (!isStandardNewDelete(DE->getOperatorDelete(), C.getASTContext()))
+    return;
+
+  ProgramStateRef State = C.getState();
+  bool ReleasedAllocated;
+  State = FreeMemAux(C, DE->getArgument(), DE, State,
+                     /*Hold*/false, ReleasedAllocated);
+
+  C.addTransition(State);
+}
+
+static bool isKnownDeallocObjCMethodName(const ObjCMethodCall &Call) {
+  // If the first selector piece is one of the names below, assume that the
+  // object takes ownership of the memory, promising to eventually deallocate it
+  // with free().
+  // Ex:  [NSData dataWithBytesNoCopy:bytes length:10];
+  // (...unless a 'freeWhenDone' parameter is false, but that's checked later.)
+  StringRef FirstSlot = Call.getSelector().getNameForSlot(0);
+  if (FirstSlot == "dataWithBytesNoCopy" ||
+      FirstSlot == "initWithBytesNoCopy" ||
+      FirstSlot == "initWithCharactersNoCopy")
+    return true;
+
+  return false;
+}
+
+static Optional<bool> getFreeWhenDoneArg(const ObjCMethodCall &Call) {
+  Selector S = Call.getSelector();
+
+  // FIXME: We should not rely on fully-constrained symbols being folded.
+  for (unsigned i = 1; i < S.getNumArgs(); ++i)
+    if (S.getNameForSlot(i).equals("freeWhenDone"))
+      return !Call.getArgSVal(i).isZeroConstant();
+
+  return None;
+}
+
+void MallocChecker::checkPostObjCMessage(const ObjCMethodCall &Call,
+                                         CheckerContext &C) const {
+  if (C.wasInlined)
+    return;
+
+  if (!isKnownDeallocObjCMethodName(Call))
+    return;
+
+  if (Optional<bool> FreeWhenDone = getFreeWhenDoneArg(Call))
+    if (!*FreeWhenDone)
+      return;
+
+  bool ReleasedAllocatedMemory;
+  ProgramStateRef State = FreeMemAux(C, Call.getArgExpr(0),
+                                     Call.getOriginExpr(), C.getState(),
+                                     /*Hold=*/true, ReleasedAllocatedMemory,
+                                     /*RetNullOnFailure=*/true);
+
+  C.addTransition(State);
+}
+
+ProgramStateRef MallocChecker::MallocMemReturnsAttr(CheckerContext &C,
+                                                    const CallExpr *CE,
+                                                    const OwnershipAttr* Att) {
+  if (Att->getModule() != "malloc")
+    return 0;
+
+  OwnershipAttr::args_iterator I = Att->args_begin(), E = Att->args_end();
+  if (I != E) {
+    return MallocMemAux(C, CE, CE->getArg(*I), UndefinedVal(), C.getState());
+  }
+  return MallocMemAux(C, CE, UnknownVal(), UndefinedVal(), C.getState());
+}
+
+ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
+                                           const CallExpr *CE,
+                                           SVal Size, SVal Init,
+                                           ProgramStateRef State,
+                                           AllocationFamily Family) {
+
+  // Bind the return value to the symbolic value from the heap region.
+  // TODO: We could rewrite post visit to eval call; 'malloc' does not have
+  // side effects other than what we model here.
+  unsigned Count = C.blockCount();
+  SValBuilder &svalBuilder = C.getSValBuilder();
+  const LocationContext *LCtx = C.getPredecessor()->getLocationContext();
+  DefinedSVal RetVal = svalBuilder.getConjuredHeapSymbolVal(CE, LCtx, Count)
+      .castAs<DefinedSVal>();
+  State = State->BindExpr(CE, C.getLocationContext(), RetVal);
+
+  // We expect the malloc functions to return a pointer.
+  if (!RetVal.getAs<Loc>())
+    return 0;
+
+  // Fill the region with the initialization value.
+  State = State->bindDefault(RetVal, Init);
+
+  // Set the region's extent equal to the Size parameter.
+  const SymbolicRegion *R =
+      dyn_cast_or_null<SymbolicRegion>(RetVal.getAsRegion());
+  if (!R)
+    return 0;
+  if (Optional<DefinedOrUnknownSVal> DefinedSize =
+          Size.getAs<DefinedOrUnknownSVal>()) {
+    SValBuilder &svalBuilder = C.getSValBuilder();
+    DefinedOrUnknownSVal Extent = R->getExtent(svalBuilder);
+    DefinedOrUnknownSVal extentMatchesSize =
+        svalBuilder.evalEQ(State, Extent, *DefinedSize);
+
+    State = State->assume(extentMatchesSize, true);
+    assert(State);
+  }
+  
+  return MallocUpdateRefState(C, CE, State, Family);
+}
+
+ProgramStateRef MallocChecker::MallocUpdateRefState(CheckerContext &C,
+                                                    const Expr *E,
+                                                    ProgramStateRef State,
+                                                    AllocationFamily Family) {
+  // Get the return value.
+  SVal retVal = State->getSVal(E, C.getLocationContext());
+
+  // We expect the malloc functions to return a pointer.
+  if (!retVal.getAs<Loc>())
+    return 0;
+
+  SymbolRef Sym = retVal.getAsLocSymbol();
+  assert(Sym);
+
+  // Set the symbol's state to Allocated.
+  return State->set<RegionState>(Sym, RefState::getAllocated(Family, E));
+}
+
+ProgramStateRef MallocChecker::FreeMemAttr(CheckerContext &C,
+                                           const CallExpr *CE,
+                                           const OwnershipAttr* Att) const {
+  if (Att->getModule() != "malloc")
+    return 0;
+
+  ProgramStateRef State = C.getState();
+  bool ReleasedAllocated = false;
+
+  for (OwnershipAttr::args_iterator I = Att->args_begin(), E = Att->args_end();
+       I != E; ++I) {
+    ProgramStateRef StateI = FreeMemAux(C, CE, State, *I,
+                               Att->getOwnKind() == OwnershipAttr::Holds,
+                               ReleasedAllocated);
+    if (StateI)
+      State = StateI;
+  }
+  return State;
+}
+
+ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
+                                          const CallExpr *CE,
+                                          ProgramStateRef state,
+                                          unsigned Num,
+                                          bool Hold,
+                                          bool &ReleasedAllocated,
+                                          bool ReturnsNullOnFailure) const {
+  if (CE->getNumArgs() < (Num + 1))
+    return 0;
+
+  return FreeMemAux(C, CE->getArg(Num), CE, state, Hold,
+                    ReleasedAllocated, ReturnsNullOnFailure);
+}
+
+/// Checks if the previous call to free on the given symbol failed - if free
+/// failed, returns true. Also, returns the corresponding return value symbol.
+static bool didPreviousFreeFail(ProgramStateRef State,
+                                SymbolRef Sym, SymbolRef &RetStatusSymbol) {
+  const SymbolRef *Ret = State->get<FreeReturnValue>(Sym);
+  if (Ret) {
+    assert(*Ret && "We should not store the null return symbol");
+    ConstraintManager &CMgr = State->getConstraintManager();
+    ConditionTruthVal FreeFailed = CMgr.isNull(State, *Ret);
+    RetStatusSymbol = *Ret;
+    return FreeFailed.isConstrainedTrue();
+  }
+  return false;
+}
+
+AllocationFamily MallocChecker::getAllocationFamily(CheckerContext &C, 
+                                                    const Stmt *S) const {
+  if (!S)
+    return AF_None;
+
+  if (const CallExpr *CE = dyn_cast<CallExpr>(S)) {
+    const FunctionDecl *FD = C.getCalleeDecl(CE);
+
+    if (!FD)
+      FD = dyn_cast<FunctionDecl>(CE->getCalleeDecl());
+
+    ASTContext &Ctx = C.getASTContext();
+
+    if (isAllocationFunction(FD, Ctx) || isFreeFunction(FD, Ctx))
+      return AF_Malloc;
+
+    if (isStandardNewDelete(FD, Ctx)) {
+      OverloadedOperatorKind Kind = FD->getOverloadedOperator();
+      if (Kind == OO_New || Kind == OO_Delete)
+        return AF_CXXNew;
+      else if (Kind == OO_Array_New || Kind == OO_Array_Delete)
+        return AF_CXXNewArray;
+    }
+
+    return AF_None;
+  }
+
+  if (const CXXNewExpr *NE = dyn_cast<CXXNewExpr>(S))
+    return NE->isArray() ? AF_CXXNewArray : AF_CXXNew;
+
+  if (const CXXDeleteExpr *DE = dyn_cast<CXXDeleteExpr>(S))
+    return DE->isArrayForm() ? AF_CXXNewArray : AF_CXXNew;
+
+  if (isa<ObjCMessageExpr>(S))
+    return AF_Malloc;
+
+  return AF_None;
+}
+
+bool MallocChecker::printAllocDeallocName(raw_ostream &os, CheckerContext &C, 
+                                          const Expr *E) const {
+  if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
+    // FIXME: This doesn't handle indirect calls.
+    const FunctionDecl *FD = CE->getDirectCallee();
+    if (!FD)
+      return false;
+    
+    os << *FD;
+    if (!FD->isOverloadedOperator())
+      os << "()";
+    return true;
+  }
+
+  if (const ObjCMessageExpr *Msg = dyn_cast<ObjCMessageExpr>(E)) {
+    if (Msg->isInstanceMessage())
+      os << "-";
+    else
+      os << "+";
+    os << Msg->getSelector().getAsString();
+    return true;
+  }
+
+  if (const CXXNewExpr *NE = dyn_cast<CXXNewExpr>(E)) {
+    os << "'" 
+       << getOperatorSpelling(NE->getOperatorNew()->getOverloadedOperator())
+       << "'";
+    return true;
+  }
+
+  if (const CXXDeleteExpr *DE = dyn_cast<CXXDeleteExpr>(E)) {
+    os << "'" 
+       << getOperatorSpelling(DE->getOperatorDelete()->getOverloadedOperator())
+       << "'";
+    return true;
+  }
+
+  return false;
+}
+
+void MallocChecker::printExpectedAllocName(raw_ostream &os, CheckerContext &C,
+                                           const Expr *E) const {
+  AllocationFamily Family = getAllocationFamily(C, E);
+
+  switch(Family) {
+    case AF_Malloc: os << "malloc()"; return;
+    case AF_CXXNew: os << "'new'"; return;
+    case AF_CXXNewArray: os << "'new[]'"; return;
+    case AF_None: llvm_unreachable("not a deallocation expression");
+  }
+}
+
+void MallocChecker::printExpectedDeallocName(raw_ostream &os, 
+                                             AllocationFamily Family) const {
+  switch(Family) {
+    case AF_Malloc: os << "free()"; return;
+    case AF_CXXNew: os << "'delete'"; return;
+    case AF_CXXNewArray: os << "'delete[]'"; return;
+    case AF_None: llvm_unreachable("suspicious AF_None argument");
+  }
+}
+
+ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
+                                          const Expr *ArgExpr,
+                                          const Expr *ParentExpr,
+                                          ProgramStateRef State,
+                                          bool Hold,
+                                          bool &ReleasedAllocated,
+                                          bool ReturnsNullOnFailure) const {
+
+  SVal ArgVal = State->getSVal(ArgExpr, C.getLocationContext());
+  if (!ArgVal.getAs<DefinedOrUnknownSVal>())
+    return 0;
+  DefinedOrUnknownSVal location = ArgVal.castAs<DefinedOrUnknownSVal>();
+
+  // Check for null dereferences.
+  if (!location.getAs<Loc>())
+    return 0;
+
+  // The explicit NULL case, no operation is performed.
+  ProgramStateRef notNullState, nullState;
+  llvm::tie(notNullState, nullState) = State->assume(location);
+  if (nullState && !notNullState)
+    return 0;
+
+  // Unknown values could easily be okay
+  // Undefined values are handled elsewhere
+  if (ArgVal.isUnknownOrUndef())
+    return 0;
+
+  const MemRegion *R = ArgVal.getAsRegion();
+  
+  // Nonlocs can't be freed, of course.
+  // Non-region locations (labels and fixed addresses) also shouldn't be freed.
+  if (!R) {
+    ReportBadFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr);
+    return 0;
+  }
+  
+  R = R->StripCasts();
+  
+  // Blocks might show up as heap data, but should not be free()d
+  if (isa<BlockDataRegion>(R)) {
+    ReportBadFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr);
+    return 0;
+  }
+  
+  const MemSpaceRegion *MS = R->getMemorySpace();
+  
+  // Parameters, locals, statics, globals, and memory returned by alloca() 
+  // shouldn't be freed.
+  if (!(isa<UnknownSpaceRegion>(MS) || isa<HeapSpaceRegion>(MS))) {
+    // FIXME: at the time this code was written, malloc() regions were
+    // represented by conjured symbols, which are all in UnknownSpaceRegion.
+    // This means that there isn't actually anything from HeapSpaceRegion
+    // that should be freed, even though we allow it here.
+    // Of course, free() can work on memory allocated outside the current
+    // function, so UnknownSpaceRegion is always a possibility.
+    // False negatives are better than false positives.
+    
+    ReportBadFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr);
+    return 0;
+  }
+
+  const SymbolicRegion *SrBase = dyn_cast<SymbolicRegion>(R->getBaseRegion());
+  // Various cases could lead to non-symbol values here.
+  // For now, ignore them.
+  if (!SrBase)
+    return 0;
+
+  SymbolRef SymBase = SrBase->getSymbol();
+  const RefState *RsBase = State->get<RegionState>(SymBase);
+  SymbolRef PreviousRetStatusSymbol = 0;
+
+  if (RsBase) {
+
+    // Check for double free first.
+    if ((RsBase->isReleased() || RsBase->isRelinquished()) &&
+        !didPreviousFreeFail(State, SymBase, PreviousRetStatusSymbol)) {
+      ReportDoubleFree(C, ParentExpr->getSourceRange(), RsBase->isReleased(),
+                       SymBase, PreviousRetStatusSymbol);
+      return 0;
+
+    // If the pointer is allocated or escaped, but we are now trying to free it,
+    // check that the call to free is proper.
+    } else if (RsBase->isAllocated() || RsBase->isEscaped()) {
+
+      // Check if an expected deallocation function matches the real one.
+      bool DeallocMatchesAlloc =
+        RsBase->getAllocationFamily() == getAllocationFamily(C, ParentExpr);
+      if (!DeallocMatchesAlloc) {
+        ReportMismatchedDealloc(C, ArgExpr->getSourceRange(),
+                                ParentExpr, RsBase, SymBase);
+        return 0;
+      }
+
+      // Check if the memory location being freed is the actual location
+      // allocated, or an offset.
+      RegionOffset Offset = R->getAsOffset();
+      if (Offset.isValid() &&
+          !Offset.hasSymbolicOffset() &&
+          Offset.getOffset() != 0) {
+        const Expr *AllocExpr = cast<Expr>(RsBase->getStmt());
+        ReportOffsetFree(C, ArgVal, ArgExpr->getSourceRange(), ParentExpr, 
+                         AllocExpr);
+        return 0;
+      }
+    }
+  }
+
+  ReleasedAllocated = (RsBase != 0);
+
+  // Clean out the info on previous call to free return info.
+  State = State->remove<FreeReturnValue>(SymBase);
+
+  // Keep track of the return value. If it is NULL, we will know that free 
+  // failed.
+  if (ReturnsNullOnFailure) {
+    SVal RetVal = C.getSVal(ParentExpr);
+    SymbolRef RetStatusSymbol = RetVal.getAsSymbol();
+    if (RetStatusSymbol) {
+      C.getSymbolManager().addSymbolDependency(SymBase, RetStatusSymbol);
+      State = State->set<FreeReturnValue>(SymBase, RetStatusSymbol);
+    }
+  }
+
+  AllocationFamily Family = RsBase ? RsBase->getAllocationFamily()
+                                   : getAllocationFamily(C, ParentExpr);
+  // Normal free.
+  if (Hold)
+    return State->set<RegionState>(SymBase,
+                                   RefState::getRelinquished(Family,
+                                                             ParentExpr));
+
+  return State->set<RegionState>(SymBase,
+                                 RefState::getReleased(Family, ParentExpr));
+}
+
+bool MallocChecker::isTrackedByCurrentChecker(AllocationFamily Family) const {
+  switch (Family) {
+  case AF_Malloc: {
+    if (!Filter.CMallocOptimistic && !Filter.CMallocPessimistic)
+      return false;
+    return true;
+  }
+  case AF_CXXNew:
+  case AF_CXXNewArray: {
+    if (!Filter.CNewDeleteChecker)
+      return false;
+    return true;
+  }
+  case AF_None: {
+    llvm_unreachable("no family");
+  }
+  }
+  llvm_unreachable("unhandled family");
+}
+
+bool
+MallocChecker::isTrackedByCurrentChecker(CheckerContext &C, 
+                                         const Stmt *AllocDeallocStmt) const {
+  return isTrackedByCurrentChecker(getAllocationFamily(C, AllocDeallocStmt));
+}
+
+bool MallocChecker::isTrackedByCurrentChecker(CheckerContext &C,
+                                              SymbolRef Sym) const {
+
+  const RefState *RS = C.getState()->get<RegionState>(Sym);
+  assert(RS);
+  return isTrackedByCurrentChecker(RS->getAllocationFamily());
+}
+
+bool MallocChecker::SummarizeValue(raw_ostream &os, SVal V) {
+  if (Optional<nonloc::ConcreteInt> IntVal = V.getAs<nonloc::ConcreteInt>())
+    os << "an integer (" << IntVal->getValue() << ")";
+  else if (Optional<loc::ConcreteInt> ConstAddr = V.getAs<loc::ConcreteInt>())
+    os << "a constant address (" << ConstAddr->getValue() << ")";
+  else if (Optional<loc::GotoLabel> Label = V.getAs<loc::GotoLabel>())
+    os << "the address of the label '" << Label->getLabel()->getName() << "'";
+  else
+    return false;
+  
+  return true;
+}
+
+bool MallocChecker::SummarizeRegion(raw_ostream &os,
+                                    const MemRegion *MR) {
+  switch (MR->getKind()) {
+  case MemRegion::FunctionTextRegionKind: {
+    const NamedDecl *FD = cast<FunctionTextRegion>(MR)->getDecl();
+    if (FD)
+      os << "the address of the function '" << *FD << '\'';
+    else
+      os << "the address of a function";
+    return true;
+  }
+  case MemRegion::BlockTextRegionKind:
+    os << "block text";
+    return true;
+  case MemRegion::BlockDataRegionKind:
+    // FIXME: where the block came from?
+    os << "a block";
+    return true;
+  default: {
+    const MemSpaceRegion *MS = MR->getMemorySpace();
+    
+    if (isa<StackLocalsSpaceRegion>(MS)) {
+      const VarRegion *VR = dyn_cast<VarRegion>(MR);
+      const VarDecl *VD;
+      if (VR)
+        VD = VR->getDecl();
+      else
+        VD = NULL;
+      
+      if (VD)
+        os << "the address of the local variable '" << VD->getName() << "'";
+      else
+        os << "the address of a local stack variable";
+      return true;
+    }
+
+    if (isa<StackArgumentsSpaceRegion>(MS)) {
+      const VarRegion *VR = dyn_cast<VarRegion>(MR);
+      const VarDecl *VD;
+      if (VR)
+        VD = VR->getDecl();
+      else
+        VD = NULL;
+      
+      if (VD)
+        os << "the address of the parameter '" << VD->getName() << "'";
+      else
+        os << "the address of a parameter";
+      return true;
+    }
+
+    if (isa<GlobalsSpaceRegion>(MS)) {
+      const VarRegion *VR = dyn_cast<VarRegion>(MR);
+      const VarDecl *VD;
+      if (VR)
+        VD = VR->getDecl();
+      else
+        VD = NULL;
+      
+      if (VD) {
+        if (VD->isStaticLocal())
+          os << "the address of the static variable '" << VD->getName() << "'";
+        else
+          os << "the address of the global variable '" << VD->getName() << "'";
+      } else
+        os << "the address of a global variable";
+      return true;
+    }
+
+    return false;
+  }
+  }
+}
+
+void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal, 
+                                  SourceRange Range, 
+                                  const Expr *DeallocExpr) const {
+
+  if (!Filter.CMallocOptimistic && !Filter.CMallocPessimistic && 
+      !Filter.CNewDeleteChecker)
+    return;
+
+  if (!isTrackedByCurrentChecker(C, DeallocExpr))
+    return;
+
+  if (ExplodedNode *N = C.generateSink()) {
+    if (!BT_BadFree)
+      BT_BadFree.reset(new BugType("Bad free", "Memory Error"));
+    
+    SmallString<100> buf;
+    llvm::raw_svector_ostream os(buf);
+
+    const MemRegion *MR = ArgVal.getAsRegion();
+    while (const ElementRegion *ER = dyn_cast_or_null<ElementRegion>(MR))
+      MR = ER->getSuperRegion();
+
+    if (MR && isa<AllocaRegion>(MR))
+      os << "Memory allocated by alloca() should not be deallocated";
+    else {
+      os << "Argument to ";
+      if (!printAllocDeallocName(os, C, DeallocExpr))
+        os << "deallocator";
+
+      os << " is ";
+      bool Summarized = MR ? SummarizeRegion(os, MR) 
+                           : SummarizeValue(os, ArgVal);
+      if (Summarized)
+        os << ", which is not memory allocated by ";
+      else
+        os << "not memory allocated by ";
+
+      printExpectedAllocName(os, C, DeallocExpr);
+    }
+
+    BugReport *R = new BugReport(*BT_BadFree, os.str(), N);
+    R->markInteresting(MR);
+    R->addRange(Range);
+    C.emitReport(R);
+  }
+}
+
+void MallocChecker::ReportMismatchedDealloc(CheckerContext &C, 
+                                            SourceRange Range,
+                                            const Expr *DeallocExpr, 
+                                            const RefState *RS,
+                                            SymbolRef Sym) const {
+
+  if (!Filter.CMismatchedDeallocatorChecker)
+    return;
+
+  if (ExplodedNode *N = C.generateSink()) {
+    if (!BT_MismatchedDealloc)
+      BT_MismatchedDealloc.reset(new BugType("Bad deallocator",
+                                             "Memory Error"));
+    
+    SmallString<100> buf;
+    llvm::raw_svector_ostream os(buf);
+
+    const Expr *AllocExpr = cast<Expr>(RS->getStmt());
+    SmallString<20> AllocBuf;
+    llvm::raw_svector_ostream AllocOs(AllocBuf);
+    SmallString<20> DeallocBuf;
+    llvm::raw_svector_ostream DeallocOs(DeallocBuf);
+
+    os << "Memory";
+    if (printAllocDeallocName(AllocOs, C, AllocExpr))
+      os << " allocated by " << AllocOs.str();
+
+    os << " should be deallocated by ";
+      printExpectedDeallocName(os, RS->getAllocationFamily());
+
+    if (printAllocDeallocName(DeallocOs, C, DeallocExpr))
+      os << ", not " << DeallocOs.str();
+
+    BugReport *R = new BugReport(*BT_MismatchedDealloc, os.str(), N);
+    R->markInteresting(Sym);
+    R->addRange(Range);
+    R->addVisitor(new MallocBugVisitor(Sym));
+    C.emitReport(R);
+  }
+}
+
+void MallocChecker::ReportOffsetFree(CheckerContext &C, SVal ArgVal,
+                                     SourceRange Range, const Expr *DeallocExpr,
+                                     const Expr *AllocExpr) const {
+
+  if (!Filter.CMallocOptimistic && !Filter.CMallocPessimistic && 
+      !Filter.CNewDeleteChecker)
+    return;
+
+  if (!isTrackedByCurrentChecker(C, AllocExpr))
+    return;
+
+  ExplodedNode *N = C.generateSink();
+  if (N == NULL)
+    return;
+
+  if (!BT_OffsetFree)
+    BT_OffsetFree.reset(new BugType("Offset free", "Memory Error"));
+
+  SmallString<100> buf;
+  llvm::raw_svector_ostream os(buf);
+  SmallString<20> AllocNameBuf;
+  llvm::raw_svector_ostream AllocNameOs(AllocNameBuf);
+
+  const MemRegion *MR = ArgVal.getAsRegion();
+  assert(MR && "Only MemRegion based symbols can have offset free errors");
+
+  RegionOffset Offset = MR->getAsOffset();
+  assert((Offset.isValid() &&
+          !Offset.hasSymbolicOffset() &&
+          Offset.getOffset() != 0) &&
+         "Only symbols with a valid offset can have offset free errors");
+
+  int offsetBytes = Offset.getOffset() / C.getASTContext().getCharWidth();
+
+  os << "Argument to ";
+  if (!printAllocDeallocName(os, C, DeallocExpr))
+    os << "deallocator";
+  os << " is offset by "
+     << offsetBytes
+     << " "
+     << ((abs(offsetBytes) > 1) ? "bytes" : "byte")
+     << " from the start of ";
+  if (AllocExpr && printAllocDeallocName(AllocNameOs, C, AllocExpr))
+    os << "memory allocated by " << AllocNameOs.str();
+  else
+    os << "allocated memory";
+
+  BugReport *R = new BugReport(*BT_OffsetFree, os.str(), N);
+  R->markInteresting(MR->getBaseRegion());
+  R->addRange(Range);
+  C.emitReport(R);
+}
+
+void MallocChecker::ReportUseAfterFree(CheckerContext &C, SourceRange Range,
+                                       SymbolRef Sym) const {
+
+  if (!Filter.CMallocOptimistic && !Filter.CMallocPessimistic && 
+      !Filter.CNewDeleteChecker)
+    return;
+
+  if (!isTrackedByCurrentChecker(C, Sym))
+    return;
+
+  if (ExplodedNode *N = C.generateSink()) {
+    if (!BT_UseFree)
+      BT_UseFree.reset(new BugType("Use-after-free", "Memory Error"));
+
+    BugReport *R = new BugReport(*BT_UseFree,
+                                 "Use of memory after it is freed", N);
+
+    R->markInteresting(Sym);
+    R->addRange(Range);
+    R->addVisitor(new MallocBugVisitor(Sym));
+    C.emitReport(R);
+  }
+}
+
+void MallocChecker::ReportDoubleFree(CheckerContext &C, SourceRange Range,
+                                     bool Released, SymbolRef Sym, 
+                                     SymbolRef PrevSym) const {
+
+  if (!Filter.CMallocOptimistic && !Filter.CMallocPessimistic && 
+      !Filter.CNewDeleteChecker)
+    return;
+
+  if (!isTrackedByCurrentChecker(C, Sym))
+    return;
+
+  if (ExplodedNode *N = C.generateSink()) {
+    if (!BT_DoubleFree)
+      BT_DoubleFree.reset(new BugType("Double free", "Memory Error"));
+
+    BugReport *R = new BugReport(*BT_DoubleFree,
+      (Released ? "Attempt to free released memory"
+                : "Attempt to free non-owned memory"),
+      N);
+    R->addRange(Range);
+    R->markInteresting(Sym);
+    if (PrevSym)
+      R->markInteresting(PrevSym);
+    R->addVisitor(new MallocBugVisitor(Sym));
+    C.emitReport(R);
+  }
+}
+
+ProgramStateRef MallocChecker::ReallocMem(CheckerContext &C,
+                                          const CallExpr *CE,
+                                          bool FreesOnFail) const {
+  if (CE->getNumArgs() < 2)
+    return 0;
+
+  ProgramStateRef state = C.getState();
+  const Expr *arg0Expr = CE->getArg(0);
+  const LocationContext *LCtx = C.getLocationContext();
+  SVal Arg0Val = state->getSVal(arg0Expr, LCtx);
+  if (!Arg0Val.getAs<DefinedOrUnknownSVal>())
+    return 0;
+  DefinedOrUnknownSVal arg0Val = Arg0Val.castAs<DefinedOrUnknownSVal>();
+
+  SValBuilder &svalBuilder = C.getSValBuilder();
+
+  DefinedOrUnknownSVal PtrEQ =
+    svalBuilder.evalEQ(state, arg0Val, svalBuilder.makeNull());
+
+  // Get the size argument. If there is no size arg then give up.
+  const Expr *Arg1 = CE->getArg(1);
+  if (!Arg1)
+    return 0;
+
+  // Get the value of the size argument.
+  SVal Arg1ValG = state->getSVal(Arg1, LCtx);
+  if (!Arg1ValG.getAs<DefinedOrUnknownSVal>())
+    return 0;
+  DefinedOrUnknownSVal Arg1Val = Arg1ValG.castAs<DefinedOrUnknownSVal>();
+
+  // Compare the size argument to 0.
+  DefinedOrUnknownSVal SizeZero =
+    svalBuilder.evalEQ(state, Arg1Val,
+                       svalBuilder.makeIntValWithPtrWidth(0, false));
+
+  ProgramStateRef StatePtrIsNull, StatePtrNotNull;
+  llvm::tie(StatePtrIsNull, StatePtrNotNull) = state->assume(PtrEQ);
+  ProgramStateRef StateSizeIsZero, StateSizeNotZero;
+  llvm::tie(StateSizeIsZero, StateSizeNotZero) = state->assume(SizeZero);
+  // We only assume exceptional states if they are definitely true; if the
+  // state is under-constrained, assume regular realloc behavior.
+  bool PrtIsNull = StatePtrIsNull && !StatePtrNotNull;
+  bool SizeIsZero = StateSizeIsZero && !StateSizeNotZero;
+
+  // If the ptr is NULL and the size is not 0, the call is equivalent to 
+  // malloc(size).
+  if ( PrtIsNull && !SizeIsZero) {
+    ProgramStateRef stateMalloc = MallocMemAux(C, CE, CE->getArg(1),
+                                               UndefinedVal(), StatePtrIsNull);
+    return stateMalloc;
+  }
+
+  if (PrtIsNull && SizeIsZero)
+    return 0;
+
+  // Get the from and to pointer symbols as in toPtr = realloc(fromPtr, size).
+  assert(!PrtIsNull);
+  SymbolRef FromPtr = arg0Val.getAsSymbol();
+  SVal RetVal = state->getSVal(CE, LCtx);
+  SymbolRef ToPtr = RetVal.getAsSymbol();
+  if (!FromPtr || !ToPtr)
+    return 0;
+
+  bool ReleasedAllocated = false;
+
+  // If the size is 0, free the memory.
+  if (SizeIsZero)
+    if (ProgramStateRef stateFree = FreeMemAux(C, CE, StateSizeIsZero, 0,
+                                               false, ReleasedAllocated)){
+      // The semantics of the return value are:
+      // If size was equal to 0, either NULL or a pointer suitable to be passed
+      // to free() is returned. We just free the input pointer and do not add
+      // any constrains on the output pointer.
+      return stateFree;
+    }
+
+  // Default behavior.
+  if (ProgramStateRef stateFree =
+        FreeMemAux(C, CE, state, 0, false, ReleasedAllocated)) {
+
+    ProgramStateRef stateRealloc = MallocMemAux(C, CE, CE->getArg(1),
+                                                UnknownVal(), stateFree);
+    if (!stateRealloc)
+      return 0;
+
+    ReallocPairKind Kind = RPToBeFreedAfterFailure;
+    if (FreesOnFail)
+      Kind = RPIsFreeOnFailure;
+    else if (!ReleasedAllocated)
+      Kind = RPDoNotTrackAfterFailure;
+
+    // Record the info about the reallocated symbol so that we could properly
+    // process failed reallocation.
+    stateRealloc = stateRealloc->set<ReallocPairs>(ToPtr,
+                                                   ReallocPair(FromPtr, Kind));
+    // The reallocated symbol should stay alive for as long as the new symbol.
+    C.getSymbolManager().addSymbolDependency(ToPtr, FromPtr);
+    return stateRealloc;
+  }
+  return 0;
+}
+
+ProgramStateRef MallocChecker::CallocMem(CheckerContext &C, const CallExpr *CE){
+  if (CE->getNumArgs() < 2)
+    return 0;
+
+  ProgramStateRef state = C.getState();
+  SValBuilder &svalBuilder = C.getSValBuilder();
+  const LocationContext *LCtx = C.getLocationContext();
+  SVal count = state->getSVal(CE->getArg(0), LCtx);
+  SVal elementSize = state->getSVal(CE->getArg(1), LCtx);
+  SVal TotalSize = svalBuilder.evalBinOp(state, BO_Mul, count, elementSize,
+                                        svalBuilder.getContext().getSizeType());  
+  SVal zeroVal = svalBuilder.makeZeroVal(svalBuilder.getContext().CharTy);
+
+  return MallocMemAux(C, CE, TotalSize, zeroVal, state);
+}
+
+LeakInfo
+MallocChecker::getAllocationSite(const ExplodedNode *N, SymbolRef Sym,
+                                 CheckerContext &C) const {
+  const LocationContext *LeakContext = N->getLocationContext();
+  // Walk the ExplodedGraph backwards and find the first node that referred to
+  // the tracked symbol.
+  const ExplodedNode *AllocNode = N;
+  const MemRegion *ReferenceRegion = 0;
+
+  while (N) {
+    ProgramStateRef State = N->getState();
+    if (!State->get<RegionState>(Sym))
+      break;
+
+    // Find the most recent expression bound to the symbol in the current
+    // context.
+      if (!ReferenceRegion) {
+        if (const MemRegion *MR = C.getLocationRegionIfPostStore(N)) {
+          SVal Val = State->getSVal(MR);
+          if (Val.getAsLocSymbol() == Sym) {
+            const VarRegion* VR = MR->getBaseRegion()->getAs<VarRegion>();
+            // Do not show local variables belonging to a function other than
+            // where the error is reported.
+            if (!VR ||
+                (VR->getStackFrame() == LeakContext->getCurrentStackFrame()))
+              ReferenceRegion = MR;
+          }
+        }
+      }
+
+    // Allocation node, is the last node in the current context in which the
+    // symbol was tracked.
+    if (N->getLocationContext() == LeakContext)
+      AllocNode = N;
+    N = N->pred_empty() ? NULL : *(N->pred_begin());
+  }
+
+  return LeakInfo(AllocNode, ReferenceRegion);
+}
+
+void MallocChecker::reportLeak(SymbolRef Sym, ExplodedNode *N,
+                               CheckerContext &C) const {
+
+  if (!Filter.CMallocOptimistic && !Filter.CMallocPessimistic && 
+      !Filter.CNewDeleteLeaksChecker)
+    return;
+
+  const RefState *RS = C.getState()->get<RegionState>(Sym);
+  assert(RS && "cannot leak an untracked symbol");
+  AllocationFamily Family = RS->getAllocationFamily();
+  if (!isTrackedByCurrentChecker(Family))
+    return;
+
+  // Special case for new and new[]; these are controlled by a separate checker
+  // flag so that they can be selectively disabled.
+  if (Family == AF_CXXNew || Family == AF_CXXNewArray)
+    if (!Filter.CNewDeleteLeaksChecker)
+      return;
+
+  assert(N);
+  if (!BT_Leak) {
+    BT_Leak.reset(new BugType("Memory leak", "Memory Error"));
+    // Leaks should not be reported if they are post-dominated by a sink:
+    // (1) Sinks are higher importance bugs.
+    // (2) NoReturnFunctionChecker uses sink nodes to represent paths ending
+    //     with __noreturn functions such as assert() or exit(). We choose not
+    //     to report leaks on such paths.
+    BT_Leak->setSuppressOnSink(true);
+  }
+
+  // Most bug reports are cached at the location where they occurred.
+  // With leaks, we want to unique them by the location where they were
+  // allocated, and only report a single path.
+  PathDiagnosticLocation LocUsedForUniqueing;
+  const ExplodedNode *AllocNode = 0;
+  const MemRegion *Region = 0;
+  llvm::tie(AllocNode, Region) = getAllocationSite(N, Sym, C);
+  
+  ProgramPoint P = AllocNode->getLocation();
+  const Stmt *AllocationStmt = 0;
+  if (Optional<CallExitEnd> Exit = P.getAs<CallExitEnd>())
+    AllocationStmt = Exit->getCalleeContext()->getCallSite();
+  else if (Optional<StmtPoint> SP = P.getAs<StmtPoint>())
+    AllocationStmt = SP->getStmt();
+  if (AllocationStmt)
+    LocUsedForUniqueing = PathDiagnosticLocation::createBegin(AllocationStmt,
+                                              C.getSourceManager(),
+                                              AllocNode->getLocationContext());
+
+  SmallString<200> buf;
+  llvm::raw_svector_ostream os(buf);
+  if (Region && Region->canPrintPretty()) {
+    os << "Potential leak of memory pointed to by ";
+    Region->printPretty(os);
+  } else {
+    os << "Potential memory leak";
+  }
+
+  BugReport *R = new BugReport(*BT_Leak, os.str(), N, 
+                               LocUsedForUniqueing, 
+                               AllocNode->getLocationContext()->getDecl());
+  R->markInteresting(Sym);
+  R->addVisitor(new MallocBugVisitor(Sym, true));
+  C.emitReport(R);
+}
+
+void MallocChecker::checkDeadSymbols(SymbolReaper &SymReaper,
+                                     CheckerContext &C) const
+{
+  if (!SymReaper.hasDeadSymbols())
+    return;
+
+  ProgramStateRef state = C.getState();
+  RegionStateTy RS = state->get<RegionState>();
+  RegionStateTy::Factory &F = state->get_context<RegionState>();
+
+  SmallVector<SymbolRef, 2> Errors;
+  for (RegionStateTy::iterator I = RS.begin(), E = RS.end(); I != E; ++I) {
+    if (SymReaper.isDead(I->first)) {
+      if (I->second.isAllocated())
+        Errors.push_back(I->first);
+      // Remove the dead symbol from the map.
+      RS = F.remove(RS, I->first);
+
+    }
+  }
+  
+  // Cleanup the Realloc Pairs Map.
+  ReallocPairsTy RP = state->get<ReallocPairs>();
+  for (ReallocPairsTy::iterator I = RP.begin(), E = RP.end(); I != E; ++I) {
+    if (SymReaper.isDead(I->first) ||
+        SymReaper.isDead(I->second.ReallocatedSym)) {
+      state = state->remove<ReallocPairs>(I->first);
+    }
+  }
+
+  // Cleanup the FreeReturnValue Map.
+  FreeReturnValueTy FR = state->get<FreeReturnValue>();
+  for (FreeReturnValueTy::iterator I = FR.begin(), E = FR.end(); I != E; ++I) {
+    if (SymReaper.isDead(I->first) ||
+        SymReaper.isDead(I->second)) {
+      state = state->remove<FreeReturnValue>(I->first);
+    }
+  }
+
+  // Generate leak node.
+  ExplodedNode *N = C.getPredecessor();
+  if (!Errors.empty()) {
+    static SimpleProgramPointTag Tag("MallocChecker : DeadSymbolsLeak");
+    N = C.addTransition(C.getState(), C.getPredecessor(), &Tag);
+    for (SmallVector<SymbolRef, 2>::iterator
+        I = Errors.begin(), E = Errors.end(); I != E; ++I) {
+      reportLeak(*I, N, C);
+    }
+  }
+
+  C.addTransition(state->set<RegionState>(RS), N);
+}
+
+void MallocChecker::checkPreCall(const CallEvent &Call,
+                                 CheckerContext &C) const {
+
+  // We will check for double free in the post visit.
+  if (const AnyFunctionCall *FC = dyn_cast<AnyFunctionCall>(&Call)) {
+    const FunctionDecl *FD = FC->getDecl();
+    if (!FD)
+      return;
+
+    if ((Filter.CMallocOptimistic || Filter.CMallocPessimistic) &&
+        isFreeFunction(FD, C.getASTContext()))
+      return;
+
+    if (Filter.CNewDeleteChecker &&
+        isStandardNewDelete(FD, C.getASTContext()))
+      return;
+  }
+
+  // Check if the callee of a method is deleted.
+  if (const CXXInstanceCall *CC = dyn_cast<CXXInstanceCall>(&Call)) {
+    SymbolRef Sym = CC->getCXXThisVal().getAsSymbol();
+    if (!Sym || checkUseAfterFree(Sym, C, CC->getCXXThisExpr()))
+      return;
+  }
+
+  // Check arguments for being used after free.
+  for (unsigned I = 0, E = Call.getNumArgs(); I != E; ++I) {
+    SVal ArgSVal = Call.getArgSVal(I);
+    if (ArgSVal.getAs<Loc>()) {
+      SymbolRef Sym = ArgSVal.getAsSymbol();
+      if (!Sym)
+        continue;
+      if (checkUseAfterFree(Sym, C, Call.getArgExpr(I)))
+        return;
+    }
+  }
+}
+
+void MallocChecker::checkPreStmt(const ReturnStmt *S, CheckerContext &C) const {
+  const Expr *E = S->getRetValue();
+  if (!E)
+    return;
+
+  // Check if we are returning a symbol.
+  ProgramStateRef State = C.getState();
+  SVal RetVal = State->getSVal(E, C.getLocationContext());
+  SymbolRef Sym = RetVal.getAsSymbol();
+  if (!Sym)
+    // If we are returning a field of the allocated struct or an array element,
+    // the callee could still free the memory.
+    // TODO: This logic should be a part of generic symbol escape callback.
+    if (const MemRegion *MR = RetVal.getAsRegion())
+      if (isa<FieldRegion>(MR) || isa<ElementRegion>(MR))
+        if (const SymbolicRegion *BMR =
+              dyn_cast<SymbolicRegion>(MR->getBaseRegion()))
+          Sym = BMR->getSymbol();
+
+  // Check if we are returning freed memory.
+  if (Sym)
+    checkUseAfterFree(Sym, C, E);
+}
+
+// TODO: Blocks should be either inlined or should call invalidate regions
+// upon invocation. After that's in place, special casing here will not be 
+// needed.
+void MallocChecker::checkPostStmt(const BlockExpr *BE,
+                                  CheckerContext &C) const {
+
+  // Scan the BlockDecRefExprs for any object the retain count checker
+  // may be tracking.
+  if (!BE->getBlockDecl()->hasCaptures())
+    return;
+
+  ProgramStateRef state = C.getState();
+  const BlockDataRegion *R =
+    cast<BlockDataRegion>(state->getSVal(BE,
+                                         C.getLocationContext()).getAsRegion());
+
+  BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(),
+                                            E = R->referenced_vars_end();
+
+  if (I == E)
+    return;
+
+  SmallVector<const MemRegion*, 10> Regions;
+  const LocationContext *LC = C.getLocationContext();
+  MemRegionManager &MemMgr = C.getSValBuilder().getRegionManager();
+
+  for ( ; I != E; ++I) {
+    const VarRegion *VR = I.getCapturedRegion();
+    if (VR->getSuperRegion() == R) {
+      VR = MemMgr.getVarRegion(VR->getDecl(), LC);
+    }
+    Regions.push_back(VR);
+  }
+
+  state =
+    state->scanReachableSymbols<StopTrackingCallback>(Regions.data(),
+                                    Regions.data() + Regions.size()).getState();
+  C.addTransition(state);
+}
+
+bool MallocChecker::isReleased(SymbolRef Sym, CheckerContext &C) const {
+  assert(Sym);
+  const RefState *RS = C.getState()->get<RegionState>(Sym);
+  return (RS && RS->isReleased());
+}
+
+bool MallocChecker::checkUseAfterFree(SymbolRef Sym, CheckerContext &C,
+                                      const Stmt *S) const {
+
+  if (isReleased(Sym, C)) {
+    ReportUseAfterFree(C, S->getSourceRange(), Sym);
+    return true;
+  }
+
+  return false;
+}
+
+// Check if the location is a freed symbolic region.
+void MallocChecker::checkLocation(SVal l, bool isLoad, const Stmt *S,
+                                  CheckerContext &C) const {
+  SymbolRef Sym = l.getLocSymbolInBase();
+  if (Sym)
+    checkUseAfterFree(Sym, C, S);
+}
+
+// If a symbolic region is assumed to NULL (or another constant), stop tracking
+// it - assuming that allocation failed on this path.
+ProgramStateRef MallocChecker::evalAssume(ProgramStateRef state,
+                                              SVal Cond,
+                                              bool Assumption) const {
+  RegionStateTy RS = state->get<RegionState>();
+  for (RegionStateTy::iterator I = RS.begin(), E = RS.end(); I != E; ++I) {
+    // If the symbol is assumed to be NULL, remove it from consideration.
+    ConstraintManager &CMgr = state->getConstraintManager();
+    ConditionTruthVal AllocFailed = CMgr.isNull(state, I.getKey());
+    if (AllocFailed.isConstrainedTrue())
+      state = state->remove<RegionState>(I.getKey());
+  }
+
+  // Realloc returns 0 when reallocation fails, which means that we should
+  // restore the state of the pointer being reallocated.
+  ReallocPairsTy RP = state->get<ReallocPairs>();
+  for (ReallocPairsTy::iterator I = RP.begin(), E = RP.end(); I != E; ++I) {
+    // If the symbol is assumed to be NULL, remove it from consideration.
+    ConstraintManager &CMgr = state->getConstraintManager();
+    ConditionTruthVal AllocFailed = CMgr.isNull(state, I.getKey());
+    if (!AllocFailed.isConstrainedTrue())
+      continue;
+
+    SymbolRef ReallocSym = I.getData().ReallocatedSym;
+    if (const RefState *RS = state->get<RegionState>(ReallocSym)) {
+      if (RS->isReleased()) {
+        if (I.getData().Kind == RPToBeFreedAfterFailure)
+          state = state->set<RegionState>(ReallocSym,
+              RefState::getAllocated(RS->getAllocationFamily(), RS->getStmt()));
+        else if (I.getData().Kind == RPDoNotTrackAfterFailure)
+          state = state->remove<RegionState>(ReallocSym);
+        else
+          assert(I.getData().Kind == RPIsFreeOnFailure);
+      }
+    }
+    state = state->remove<ReallocPairs>(I.getKey());
+  }
+
+  return state;
+}
+
+bool MallocChecker::doesNotFreeMemOrInteresting(const CallEvent *Call,
+                                                ProgramStateRef State) const {
+  assert(Call);
+
+  // For now, assume that any C++ call can free memory.
+  // TODO: If we want to be more optimistic here, we'll need to make sure that
+  // regions escape to C++ containers. They seem to do that even now, but for
+  // mysterious reasons.
+  if (!(isa<FunctionCall>(Call) || isa<ObjCMethodCall>(Call)))
+    return false;
+
+  // Check Objective-C messages by selector name.
+  if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) {
+    // If it's not a framework call, or if it takes a callback, assume it
+    // can free memory.
+    if (!Call->isInSystemHeader() || Call->hasNonZeroCallbackArg())
+      return false;
+
+    // If it's a method we know about, handle it explicitly post-call.
+    // This should happen before the "freeWhenDone" check below.
+    if (isKnownDeallocObjCMethodName(*Msg))
+      return true;
+
+    // If there's a "freeWhenDone" parameter, but the method isn't one we know
+    // about, we can't be sure that the object will use free() to deallocate the
+    // memory, so we can't model it explicitly. The best we can do is use it to
+    // decide whether the pointer escapes.
+    if (Optional<bool> FreeWhenDone = getFreeWhenDoneArg(*Msg))
+      return !*FreeWhenDone;
+
+    // If the first selector piece ends with "NoCopy", and there is no
+    // "freeWhenDone" parameter set to zero, we know ownership is being
+    // transferred. Again, though, we can't be sure that the object will use
+    // free() to deallocate the memory, so we can't model it explicitly.
+    StringRef FirstSlot = Msg->getSelector().getNameForSlot(0);
+    if (FirstSlot.endswith("NoCopy"))
+      return false;
+
+    // If the first selector starts with addPointer, insertPointer,
+    // or replacePointer, assume we are dealing with NSPointerArray or similar.
+    // This is similar to C++ containers (vector); we still might want to check
+    // that the pointers get freed by following the container itself.
+    if (FirstSlot.startswith("addPointer") ||
+        FirstSlot.startswith("insertPointer") ||
+        FirstSlot.startswith("replacePointer")) {
+      return false;
+    }
+
+    // Otherwise, assume that the method does not free memory.
+    // Most framework methods do not free memory.
+    return true;
+  }
+
+  // At this point the only thing left to handle is straight function calls.
+  const FunctionDecl *FD = cast<FunctionCall>(Call)->getDecl();
+  if (!FD)
+    return false;
+
+  ASTContext &ASTC = State->getStateManager().getContext();
+
+  // If it's one of the allocation functions we can reason about, we model
+  // its behavior explicitly.
+  if (isMemFunction(FD, ASTC))
+    return true;
+
+  // If it's not a system call, assume it frees memory.
+  if (!Call->isInSystemHeader())
+    return false;
+
+  // White list the system functions whose arguments escape.
+  const IdentifierInfo *II = FD->getIdentifier();
+  if (!II)
+    return false;
+  StringRef FName = II->getName();
+
+  // White list the 'XXXNoCopy' CoreFoundation functions.
+  // We specifically check these before 
+  if (FName.endswith("NoCopy")) {
+    // Look for the deallocator argument. We know that the memory ownership
+    // is not transferred only if the deallocator argument is
+    // 'kCFAllocatorNull'.
+    for (unsigned i = 1; i < Call->getNumArgs(); ++i) {
+      const Expr *ArgE = Call->getArgExpr(i)->IgnoreParenCasts();
+      if (const DeclRefExpr *DE = dyn_cast<DeclRefExpr>(ArgE)) {
+        StringRef DeallocatorName = DE->getFoundDecl()->getName();
+        if (DeallocatorName == "kCFAllocatorNull")
+          return true;
+      }
+    }
+    return false;
+  }
+
+  // Associating streams with malloced buffers. The pointer can escape if
+  // 'closefn' is specified (and if that function does free memory),
+  // but it will not if closefn is not specified.
+  // Currently, we do not inspect the 'closefn' function (PR12101).
+  if (FName == "funopen")
+    if (Call->getNumArgs() >= 4 && Call->getArgSVal(4).isConstant(0))
+      return true;
+
+  // Do not warn on pointers passed to 'setbuf' when used with std streams,
+  // these leaks might be intentional when setting the buffer for stdio.
+  // http://stackoverflow.com/questions/2671151/who-frees-setvbuf-buffer
+  if (FName == "setbuf" || FName =="setbuffer" ||
+      FName == "setlinebuf" || FName == "setvbuf") {
+    if (Call->getNumArgs() >= 1) {
+      const Expr *ArgE = Call->getArgExpr(0)->IgnoreParenCasts();
+      if (const DeclRefExpr *ArgDRE = dyn_cast<DeclRefExpr>(ArgE))
+        if (const VarDecl *D = dyn_cast<VarDecl>(ArgDRE->getDecl()))
+          if (D->getCanonicalDecl()->getName().find("std") != StringRef::npos)
+            return false;
+    }
+  }
+
+  // A bunch of other functions which either take ownership of a pointer or
+  // wrap the result up in a struct or object, meaning it can be freed later.
+  // (See RetainCountChecker.) Not all the parameters here are invalidated,
+  // but the Malloc checker cannot differentiate between them. The right way
+  // of doing this would be to implement a pointer escapes callback.
+  if (FName == "CGBitmapContextCreate" ||
+      FName == "CGBitmapContextCreateWithData" ||
+      FName == "CVPixelBufferCreateWithBytes" ||
+      FName == "CVPixelBufferCreateWithPlanarBytes" ||
+      FName == "OSAtomicEnqueue") {
+    return false;
+  }
+
+  // Handle cases where we know a buffer's /address/ can escape.
+  // Note that the above checks handle some special cases where we know that
+  // even though the address escapes, it's still our responsibility to free the
+  // buffer.
+  if (Call->argumentsMayEscape())
+    return false;
+
+  // Otherwise, assume that the function does not free memory.
+  // Most system calls do not free the memory.
+  return true;
+}
+
+static bool retTrue(const RefState *RS) {
+  return true;
+}
+
+static bool checkIfNewOrNewArrayFamily(const RefState *RS) {
+  return (RS->getAllocationFamily() == AF_CXXNewArray ||
+          RS->getAllocationFamily() == AF_CXXNew);
+}
+
+ProgramStateRef MallocChecker::checkPointerEscape(ProgramStateRef State,
+                                             const InvalidatedSymbols &Escaped,
+                                             const CallEvent *Call,
+                                             PointerEscapeKind Kind) const {
+  return checkPointerEscapeAux(State, Escaped, Call, Kind, &retTrue);
+}
+
+ProgramStateRef MallocChecker::checkConstPointerEscape(ProgramStateRef State,
+                                              const InvalidatedSymbols &Escaped,
+                                              const CallEvent *Call,
+                                              PointerEscapeKind Kind) const {
+  return checkPointerEscapeAux(State, Escaped, Call, Kind,
+                               &checkIfNewOrNewArrayFamily);
+}
+
+ProgramStateRef MallocChecker::checkPointerEscapeAux(ProgramStateRef State,
+                                              const InvalidatedSymbols &Escaped,
+                                              const CallEvent *Call,
+                                              PointerEscapeKind Kind,
+                                  bool(*CheckRefState)(const RefState*)) const {
+  // If we know that the call does not free memory, or we want to process the
+  // call later, keep tracking the top level arguments.
+  if ((Kind == PSK_DirectEscapeOnCall ||
+       Kind == PSK_IndirectEscapeOnCall) &&
+      doesNotFreeMemOrInteresting(Call, State)) {
+    return State;
+  }
+
+  for (InvalidatedSymbols::const_iterator I = Escaped.begin(),
+       E = Escaped.end();
+       I != E; ++I) {
+    SymbolRef sym = *I;
+
+    if (const RefState *RS = State->get<RegionState>(sym)) {
+      if (RS->isAllocated() && CheckRefState(RS)) {
+        State = State->remove<RegionState>(sym);
+        State = State->set<RegionState>(sym, RefState::getEscaped(RS));
+      }
+    }
+  }
+  return State;
+}
+
+static SymbolRef findFailedReallocSymbol(ProgramStateRef currState,
+                                         ProgramStateRef prevState) {
+  ReallocPairsTy currMap = currState->get<ReallocPairs>();
+  ReallocPairsTy prevMap = prevState->get<ReallocPairs>();
+
+  for (ReallocPairsTy::iterator I = prevMap.begin(), E = prevMap.end();
+       I != E; ++I) {
+    SymbolRef sym = I.getKey();
+    if (!currMap.lookup(sym))
+      return sym;
+  }
+
+  return NULL;
+}
+
+PathDiagnosticPiece *
+MallocChecker::MallocBugVisitor::VisitNode(const ExplodedNode *N,
+                                           const ExplodedNode *PrevN,
+                                           BugReporterContext &BRC,
+                                           BugReport &BR) {
+  ProgramStateRef state = N->getState();
+  ProgramStateRef statePrev = PrevN->getState();
+
+  const RefState *RS = state->get<RegionState>(Sym);
+  const RefState *RSPrev = statePrev->get<RegionState>(Sym);
+  if (!RS)
+    return 0;
+
+  const Stmt *S = 0;
+  const char *Msg = 0;
+  StackHintGeneratorForSymbol *StackHint = 0;
+
+  // Retrieve the associated statement.
+  ProgramPoint ProgLoc = N->getLocation();
+  if (Optional<StmtPoint> SP = ProgLoc.getAs<StmtPoint>()) {
+    S = SP->getStmt();
+  } else if (Optional<CallExitEnd> Exit = ProgLoc.getAs<CallExitEnd>()) {
+    S = Exit->getCalleeContext()->getCallSite();
+  } else if (Optional<BlockEdge> Edge = ProgLoc.getAs<BlockEdge>()) {
+    // If an assumption was made on a branch, it should be caught
+    // here by looking at the state transition.
+    S = Edge->getSrc()->getTerminator();
+  }
+
+  if (!S)
+    return 0;
+
+  // FIXME: We will eventually need to handle non-statement-based events
+  // (__attribute__((cleanup))).
+
+  // Find out if this is an interesting point and what is the kind.
+  if (Mode == Normal) {
+    if (isAllocated(RS, RSPrev, S)) {
+      Msg = "Memory is allocated";
+      StackHint = new StackHintGeneratorForSymbol(Sym,
+                                                  "Returned allocated memory");
+    } else if (isReleased(RS, RSPrev, S)) {
+      Msg = "Memory is released";
+      StackHint = new StackHintGeneratorForSymbol(Sym,
+                                             "Returning; memory was released");
+    } else if (isRelinquished(RS, RSPrev, S)) {
+      Msg = "Memory ownership is transfered";
+      StackHint = new StackHintGeneratorForSymbol(Sym, "");
+    } else if (isReallocFailedCheck(RS, RSPrev, S)) {
+      Mode = ReallocationFailed;
+      Msg = "Reallocation failed";
+      StackHint = new StackHintGeneratorForReallocationFailed(Sym,
+                                                       "Reallocation failed");
+
+      if (SymbolRef sym = findFailedReallocSymbol(state, statePrev)) {
+        // Is it possible to fail two reallocs WITHOUT testing in between?
+        assert((!FailedReallocSymbol || FailedReallocSymbol == sym) &&
+          "We only support one failed realloc at a time.");
+        BR.markInteresting(sym);
+        FailedReallocSymbol = sym;
+      }
+    }
+
+  // We are in a special mode if a reallocation failed later in the path.
+  } else if (Mode == ReallocationFailed) {
+    assert(FailedReallocSymbol && "No symbol to look for.");
+
+    // Is this is the first appearance of the reallocated symbol?
+    if (!statePrev->get<RegionState>(FailedReallocSymbol)) {
+      // We're at the reallocation point.
+      Msg = "Attempt to reallocate memory";
+      StackHint = new StackHintGeneratorForSymbol(Sym,
+                                                 "Returned reallocated memory");
+      FailedReallocSymbol = NULL;
+      Mode = Normal;
+    }
+  }
+
+  if (!Msg)
+    return 0;
+  assert(StackHint);
+
+  // Generate the extra diagnostic.
+  PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
+                             N->getLocationContext());
+  return new PathDiagnosticEventPiece(Pos, Msg, true, StackHint);
+}
+
+void MallocChecker::printState(raw_ostream &Out, ProgramStateRef State,
+                               const char *NL, const char *Sep) const {
+
+  RegionStateTy RS = State->get<RegionState>();
+
+  if (!RS.isEmpty()) {
+    Out << Sep << "MallocChecker:" << NL;
+    for (RegionStateTy::iterator I = RS.begin(), E = RS.end(); I != E; ++I) {
+      I.getKey()->dumpToStream(Out);
+      Out << " : ";
+      I.getData().dump(Out);
+      Out << NL;
+    }
+  }
+}
+
+void ento::registerNewDeleteLeaksChecker(CheckerManager &mgr) {
+  registerCStringCheckerBasic(mgr);
+  mgr.registerChecker<MallocChecker>()->Filter.CNewDeleteLeaksChecker = true;
+  // We currently treat NewDeleteLeaks checker as a subchecker of NewDelete 
+  // checker.
+  mgr.registerChecker<MallocChecker>()->Filter.CNewDeleteChecker = true;
+}
+
+#define REGISTER_CHECKER(name) \
+void ento::register##name(CheckerManager &mgr) {\
+  registerCStringCheckerBasic(mgr); \
+  mgr.registerChecker<MallocChecker>()->Filter.C##name = true;\
+}
+
+REGISTER_CHECKER(MallocPessimistic)
+REGISTER_CHECKER(MallocOptimistic)
+REGISTER_CHECKER(NewDeleteChecker)
+REGISTER_CHECKER(MismatchedDeallocatorChecker)
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
new file mode 100644
index 0000000..34425e3
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
@@ -0,0 +1,267 @@
+// MallocOverflowSecurityChecker.cpp - Check for malloc overflows -*- C++ -*-=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker detects a common memory allocation security flaw.
+// Suppose 'unsigned int n' comes from an untrusted source. If the
+// code looks like 'malloc (n * 4)', and an attacker can make 'n' be
+// say MAX_UINT/4+2, then instead of allocating the correct 'n' 4-byte
+// elements, this will actually allocate only two because of overflow.
+// Then when the rest of the program attempts to store values past the
+// second element, these values will actually overwrite other items in
+// the heap, probably allowing the attacker to execute arbitrary code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/EvaluatedExprVisitor.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/ADT/SmallVector.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+struct MallocOverflowCheck {
+  const BinaryOperator *mulop;
+  const Expr *variable;
+
+  MallocOverflowCheck (const BinaryOperator *m, const Expr *v) 
+    : mulop(m), variable (v)
+  {}
+};
+
+class MallocOverflowSecurityChecker : public Checker<check::ASTCodeBody> {
+public:
+  void checkASTCodeBody(const Decl *D, AnalysisManager &mgr,
+                        BugReporter &BR) const;
+
+  void CheckMallocArgument(
+    SmallVectorImpl<MallocOverflowCheck> &PossibleMallocOverflows,
+    const Expr *TheArgument, ASTContext &Context) const;
+
+  void OutputPossibleOverflows(
+    SmallVectorImpl<MallocOverflowCheck> &PossibleMallocOverflows,
+    const Decl *D, BugReporter &BR, AnalysisManager &mgr) const;
+
+};
+} // end anonymous namespace
+
+void MallocOverflowSecurityChecker::CheckMallocArgument(
+  SmallVectorImpl<MallocOverflowCheck> &PossibleMallocOverflows,
+  const Expr *TheArgument,
+  ASTContext &Context) const {
+
+  /* Look for a linear combination with a single variable, and at least
+   one multiplication.
+   Reject anything that applies to the variable: an explicit cast,
+   conditional expression, an operation that could reduce the range
+   of the result, or anything too complicated :-).  */
+  const Expr * e = TheArgument;
+  const BinaryOperator * mulop = NULL;
+
+  for (;;) {
+    e = e->IgnoreParenImpCasts();
+    if (isa<BinaryOperator>(e)) {
+      const BinaryOperator * binop = dyn_cast<BinaryOperator>(e);
+      BinaryOperatorKind opc = binop->getOpcode();
+      // TODO: ignore multiplications by 1, reject if multiplied by 0.
+      if (mulop == NULL && opc == BO_Mul)
+        mulop = binop;
+      if (opc != BO_Mul && opc != BO_Add && opc != BO_Sub && opc != BO_Shl)
+        return;
+
+      const Expr *lhs = binop->getLHS();
+      const Expr *rhs = binop->getRHS();
+      if (rhs->isEvaluatable(Context))
+        e = lhs;
+      else if ((opc == BO_Add || opc == BO_Mul)
+               && lhs->isEvaluatable(Context))
+        e = rhs;
+      else
+        return;
+    }
+    else if (isa<DeclRefExpr>(e) || isa<MemberExpr>(e))
+      break;
+    else
+      return;
+  }
+
+  if (mulop == NULL)
+    return;
+
+  //  We've found the right structure of malloc argument, now save
+  // the data so when the body of the function is completely available
+  // we can check for comparisons.
+
+  // TODO: Could push this into the innermost scope where 'e' is
+  // defined, rather than the whole function.
+  PossibleMallocOverflows.push_back(MallocOverflowCheck(mulop, e));
+}
+
+namespace {
+// A worker class for OutputPossibleOverflows.
+class CheckOverflowOps :
+  public EvaluatedExprVisitor<CheckOverflowOps> {
+public:
+  typedef SmallVectorImpl<MallocOverflowCheck> theVecType;
+
+private:
+    theVecType &toScanFor;
+    ASTContext &Context;
+
+    bool isIntZeroExpr(const Expr *E) const {
+      if (!E->getType()->isIntegralOrEnumerationType())
+        return false;
+      llvm::APSInt Result;
+      if (E->EvaluateAsInt(Result, Context))
+        return Result == 0;
+      return false;
+    }
+
+    void CheckExpr(const Expr *E_p) {
+      const Expr *E = E_p->IgnoreParenImpCasts();
+
+      theVecType::iterator i = toScanFor.end();
+      theVecType::iterator e = toScanFor.begin();
+
+      if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E)) {
+        const Decl * EdreD = DR->getDecl();
+        while (i != e) {
+          --i;
+          if (const DeclRefExpr *DR_i = dyn_cast<DeclRefExpr>(i->variable)) {
+            if (DR_i->getDecl() == EdreD)
+              i = toScanFor.erase(i);
+          }
+        }
+      }
+      else if (isa<MemberExpr>(E)) {
+        // No points-to analysis, just look at the member
+        const Decl * EmeMD = dyn_cast<MemberExpr>(E)->getMemberDecl();
+        while (i != e) {
+          --i;
+          if (isa<MemberExpr>(i->variable)) {
+            if (dyn_cast<MemberExpr>(i->variable)->getMemberDecl() == EmeMD)
+              i = toScanFor.erase (i);
+          }
+        }
+      }
+    }
+
+  public:
+    void VisitBinaryOperator(BinaryOperator *E) {
+      if (E->isComparisonOp()) {
+        const Expr * lhs = E->getLHS();
+        const Expr * rhs = E->getRHS();
+        // Ignore comparisons against zero, since they generally don't
+        // protect against an overflow.
+        if (!isIntZeroExpr(lhs) && ! isIntZeroExpr(rhs)) {
+          CheckExpr(lhs);
+          CheckExpr(rhs);
+        }
+      }
+      EvaluatedExprVisitor<CheckOverflowOps>::VisitBinaryOperator(E);
+    }
+
+    /* We specifically ignore loop conditions, because they're typically
+     not error checks.  */
+    void VisitWhileStmt(WhileStmt *S) {
+      return this->Visit(S->getBody());
+    }
+    void VisitForStmt(ForStmt *S) {
+      return this->Visit(S->getBody());
+    }
+    void VisitDoStmt(DoStmt *S) {
+      return this->Visit(S->getBody());
+    }
+
+    CheckOverflowOps(theVecType &v, ASTContext &ctx)
+    : EvaluatedExprVisitor<CheckOverflowOps>(ctx),
+      toScanFor(v), Context(ctx)
+    { }
+  };
+}
+
+// OutputPossibleOverflows - We've found a possible overflow earlier,
+// now check whether Body might contain a comparison which might be
+// preventing the overflow.
+// This doesn't do flow analysis, range analysis, or points-to analysis; it's
+// just a dumb "is there a comparison" scan.  The aim here is to
+// detect the most blatent cases of overflow and educate the
+// programmer.
+void MallocOverflowSecurityChecker::OutputPossibleOverflows(
+  SmallVectorImpl<MallocOverflowCheck> &PossibleMallocOverflows,
+  const Decl *D, BugReporter &BR, AnalysisManager &mgr) const {
+  // By far the most common case: nothing to check.
+  if (PossibleMallocOverflows.empty())
+    return;
+
+  // Delete any possible overflows which have a comparison.
+  CheckOverflowOps c(PossibleMallocOverflows, BR.getContext());
+  c.Visit(mgr.getAnalysisDeclContext(D)->getBody());
+
+  // Output warnings for all overflows that are left.
+  for (CheckOverflowOps::theVecType::iterator
+       i = PossibleMallocOverflows.begin(),
+       e = PossibleMallocOverflows.end();
+       i != e;
+       ++i) {
+    SourceRange R = i->mulop->getSourceRange();
+    BR.EmitBasicReport(D, "malloc() size overflow", categories::UnixAPI,
+      "the computation of the size of the memory allocation may overflow",
+      PathDiagnosticLocation::createOperatorLoc(i->mulop,
+                                                BR.getSourceManager()), &R, 1);
+  }
+}
+
+void MallocOverflowSecurityChecker::checkASTCodeBody(const Decl *D,
+                                             AnalysisManager &mgr,
+                                             BugReporter &BR) const {
+
+  CFG *cfg = mgr.getCFG(D);
+  if (!cfg)
+    return;
+
+  // A list of variables referenced in possibly overflowing malloc operands.
+  SmallVector<MallocOverflowCheck, 2> PossibleMallocOverflows;
+
+  for (CFG::iterator it = cfg->begin(), ei = cfg->end(); it != ei; ++it) {
+    CFGBlock *block = *it;
+    for (CFGBlock::iterator bi = block->begin(), be = block->end();
+         bi != be; ++bi) {
+      if (Optional<CFGStmt> CS = bi->getAs<CFGStmt>()) {
+        if (const CallExpr *TheCall = dyn_cast<CallExpr>(CS->getStmt())) {
+          // Get the callee.
+          const FunctionDecl *FD = TheCall->getDirectCallee();
+
+          if (!FD)
+            return;
+
+          // Get the name of the callee. If it's a builtin, strip off the prefix.
+          IdentifierInfo *FnInfo = FD->getIdentifier();
+          if (!FnInfo)
+            return;
+
+          if (FnInfo->isStr ("malloc") || FnInfo->isStr ("_MALLOC")) {
+            if (TheCall->getNumArgs() == 1)
+              CheckMallocArgument(PossibleMallocOverflows, TheCall->getArg(0),
+                                  mgr.getASTContext());
+          }
+        }
+      }
+    }
+  }
+
+  OutputPossibleOverflows(PossibleMallocOverflows, D, BR, mgr);
+}
+
+void ento::registerMallocOverflowSecurityChecker(CheckerManager &mgr) {
+  mgr.registerChecker<MallocOverflowSecurityChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
new file mode 100644
index 0000000..d29f34f
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
@@ -0,0 +1,252 @@
+// MallocSizeofChecker.cpp - Check for dubious malloc arguments ---*- C++ -*-=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Reports inconsistencies between the casted type of the return value of a
+// malloc/calloc/realloc call and the operand of any sizeof expressions
+// contained within its argument(s).
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+typedef std::pair<const TypeSourceInfo *, const CallExpr *> TypeCallPair;
+typedef llvm::PointerUnion<const Stmt *, const VarDecl *> ExprParent;
+
+class CastedAllocFinder
+  : public ConstStmtVisitor<CastedAllocFinder, TypeCallPair> {
+  IdentifierInfo *II_malloc, *II_calloc, *II_realloc;
+
+public:
+  struct CallRecord {
+    ExprParent CastedExprParent;
+    const Expr *CastedExpr;
+    const TypeSourceInfo *ExplicitCastType;
+    const CallExpr *AllocCall;
+
+    CallRecord(ExprParent CastedExprParent, const Expr *CastedExpr,
+               const TypeSourceInfo *ExplicitCastType,
+               const CallExpr *AllocCall)
+      : CastedExprParent(CastedExprParent), CastedExpr(CastedExpr),
+        ExplicitCastType(ExplicitCastType), AllocCall(AllocCall) {}
+  };
+
+  typedef std::vector<CallRecord> CallVec;
+  CallVec Calls;
+
+  CastedAllocFinder(ASTContext *Ctx) :
+    II_malloc(&Ctx->Idents.get("malloc")),
+    II_calloc(&Ctx->Idents.get("calloc")),
+    II_realloc(&Ctx->Idents.get("realloc")) {}
+
+  void VisitChild(ExprParent Parent, const Stmt *S) {
+    TypeCallPair AllocCall = Visit(S);
+    if (AllocCall.second && AllocCall.second != S)
+      Calls.push_back(CallRecord(Parent, cast<Expr>(S), AllocCall.first,
+                                 AllocCall.second));
+  }
+
+  void VisitChildren(const Stmt *S) {
+    for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
+         I!=E; ++I)
+      if (const Stmt *child = *I)
+        VisitChild(S, child);
+  }
+
+  TypeCallPair VisitCastExpr(const CastExpr *E) {
+    return Visit(E->getSubExpr());
+  }
+
+  TypeCallPair VisitExplicitCastExpr(const ExplicitCastExpr *E) {
+    return TypeCallPair(E->getTypeInfoAsWritten(),
+                        Visit(E->getSubExpr()).second);
+  }
+
+  TypeCallPair VisitParenExpr(const ParenExpr *E) {
+    return Visit(E->getSubExpr());
+  }
+
+  TypeCallPair VisitStmt(const Stmt *S) {
+    VisitChildren(S);
+    return TypeCallPair();
+  }
+
+  TypeCallPair VisitCallExpr(const CallExpr *E) {
+    VisitChildren(E);
+    const FunctionDecl *FD = E->getDirectCallee();
+    if (FD) {
+      IdentifierInfo *II = FD->getIdentifier();
+      if (II == II_malloc || II == II_calloc || II == II_realloc)
+        return TypeCallPair((const TypeSourceInfo *)0, E);
+    }
+    return TypeCallPair();
+  }
+
+  TypeCallPair VisitDeclStmt(const DeclStmt *S) {
+    for (DeclStmt::const_decl_iterator I = S->decl_begin(), E = S->decl_end();
+         I!=E; ++I)
+      if (const VarDecl *VD = dyn_cast<VarDecl>(*I))
+        if (const Expr *Init = VD->getInit())
+          VisitChild(VD, Init);
+    return TypeCallPair();
+  }
+};
+
+class SizeofFinder : public ConstStmtVisitor<SizeofFinder> {
+public:
+  std::vector<const UnaryExprOrTypeTraitExpr *> Sizeofs;
+
+  void VisitBinMul(const BinaryOperator *E) {
+    Visit(E->getLHS());
+    Visit(E->getRHS());
+  }
+
+  void VisitImplicitCastExpr(const ImplicitCastExpr *E) {
+    return Visit(E->getSubExpr());
+  }
+
+  void VisitParenExpr(const ParenExpr *E) {
+    return Visit(E->getSubExpr());
+  }
+
+  void VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E) {
+    if (E->getKind() != UETT_SizeOf)
+      return;
+
+    Sizeofs.push_back(E);
+  }
+};
+
+// Determine if the pointee and sizeof types are compatible.  Here
+// we ignore constness of pointer types.
+static bool typesCompatible(ASTContext &C, QualType A, QualType B) {
+  while (true) {
+    A = A.getCanonicalType();
+    B = B.getCanonicalType();
+  
+    if (A.getTypePtr() == B.getTypePtr())
+      return true;
+    
+    if (const PointerType *ptrA = A->getAs<PointerType>())
+      if (const PointerType *ptrB = B->getAs<PointerType>()) {
+        A = ptrA->getPointeeType();
+        B = ptrB->getPointeeType();
+        continue;
+      }
+      
+    break;
+  }
+  
+  return false;
+}
+
+static bool compatibleWithArrayType(ASTContext &C, QualType PT, QualType T) {
+  // Ex: 'int a[10][2]' is compatible with 'int', 'int[2]', 'int[10][2]'.
+  while (const ArrayType *AT = T->getAsArrayTypeUnsafe()) {
+    QualType ElemType = AT->getElementType();
+    if (typesCompatible(C, PT, AT->getElementType()))
+      return true;
+    T = ElemType;
+  }
+
+  return false;
+}
+
+class MallocSizeofChecker : public Checker<check::ASTCodeBody> {
+public:
+  void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+                        BugReporter &BR) const {
+    AnalysisDeclContext *ADC = mgr.getAnalysisDeclContext(D);
+    CastedAllocFinder Finder(&BR.getContext());
+    Finder.Visit(D->getBody());
+    for (CastedAllocFinder::CallVec::iterator i = Finder.Calls.begin(),
+         e = Finder.Calls.end(); i != e; ++i) {
+      QualType CastedType = i->CastedExpr->getType();
+      if (!CastedType->isPointerType())
+        continue;
+      QualType PointeeType = CastedType->getAs<PointerType>()->getPointeeType();
+      if (PointeeType->isVoidType())
+        continue;
+
+      for (CallExpr::const_arg_iterator ai = i->AllocCall->arg_begin(),
+           ae = i->AllocCall->arg_end(); ai != ae; ++ai) {
+        if (!(*ai)->getType()->isIntegralOrUnscopedEnumerationType())
+          continue;
+
+        SizeofFinder SFinder;
+        SFinder.Visit(*ai);
+        if (SFinder.Sizeofs.size() != 1)
+          continue;
+
+        QualType SizeofType = SFinder.Sizeofs[0]->getTypeOfArgument();
+
+        if (typesCompatible(BR.getContext(), PointeeType, SizeofType))
+          continue;
+
+        // If the argument to sizeof is an array, the result could be a
+        // pointer to any array element.
+        if (compatibleWithArrayType(BR.getContext(), PointeeType, SizeofType))
+          continue;
+
+        const TypeSourceInfo *TSI = 0;
+        if (i->CastedExprParent.is<const VarDecl *>()) {
+          TSI =
+              i->CastedExprParent.get<const VarDecl *>()->getTypeSourceInfo();
+        } else {
+          TSI = i->ExplicitCastType;
+        }
+
+        SmallString<64> buf;
+        llvm::raw_svector_ostream OS(buf);
+
+        OS << "Result of ";
+        const FunctionDecl *Callee = i->AllocCall->getDirectCallee();
+        if (Callee && Callee->getIdentifier())
+          OS << '\'' << Callee->getIdentifier()->getName() << '\'';
+        else
+          OS << "call";
+        OS << " is converted to a pointer of type '"
+            << PointeeType.getAsString() << "', which is incompatible with "
+            << "sizeof operand type '" << SizeofType.getAsString() << "'";
+        SmallVector<SourceRange, 4> Ranges;
+        Ranges.push_back(i->AllocCall->getCallee()->getSourceRange());
+        Ranges.push_back(SFinder.Sizeofs[0]->getSourceRange());
+        if (TSI)
+          Ranges.push_back(TSI->getTypeLoc().getSourceRange());
+
+        PathDiagnosticLocation L =
+            PathDiagnosticLocation::createBegin(i->AllocCall->getCallee(),
+                BR.getSourceManager(), ADC);
+
+        BR.EmitBasicReport(D, "Allocator sizeof operand mismatch",
+            categories::UnixAPI,
+            OS.str(),
+            L, Ranges.data(), Ranges.size());
+      }
+    }
+  }
+};
+
+}
+
+void ento::registerMallocSizeofChecker(CheckerManager &mgr) {
+  mgr.registerChecker<MallocSizeofChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
new file mode 100644
index 0000000..fc28e1f
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
@@ -0,0 +1,80 @@
+//=- NSAutoreleasePoolChecker.cpp --------------------------------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines a NSAutoreleasePoolChecker, a small checker that warns
+//  about subpar uses of NSAutoreleasePool.  Note that while the check itself
+//  (in its current form) could be written as a flow-insensitive check, in
+//  can be potentially enhanced in the future with flow-sensitive information.
+//  It is also a good example of the CheckerVisitor interface. 
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class NSAutoreleasePoolChecker
+  : public Checker<check::PreObjCMessage> {
+  mutable OwningPtr<BugType> BT;
+  mutable Selector releaseS;
+
+public:
+  void checkPreObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
+};
+
+} // end anonymous namespace
+
+void NSAutoreleasePoolChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
+                                                   CheckerContext &C) const {
+  if (!msg.isInstanceMessage())
+    return;
+
+  const ObjCInterfaceDecl *OD = msg.getReceiverInterface();
+  if (!OD)
+    return;  
+  if (!OD->getIdentifier()->isStr("NSAutoreleasePool"))
+    return;
+
+  if (releaseS.isNull())
+    releaseS = GetNullarySelector("release", C.getASTContext());
+  // Sending 'release' message?
+  if (msg.getSelector() != releaseS)
+    return;
+
+  if (!BT)
+    BT.reset(new BugType("Use -drain instead of -release",
+                         "API Upgrade (Apple)"));
+
+  ExplodedNode *N = C.addTransition();
+  if (!N) {
+    assert(0);
+    return;
+  }
+
+  BugReport *Report = new BugReport(*BT, "Use -drain instead of -release when "
+    "using NSAutoreleasePool and garbage collection", N);
+  Report->addRange(msg.getSourceRange());
+  C.emitReport(Report);
+}
+
+void ento::registerNSAutoreleasePoolChecker(CheckerManager &mgr) {
+  if (mgr.getLangOpts().getGC() != LangOptions::NonGC)
+    mgr.registerChecker<NSAutoreleasePoolChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
new file mode 100644
index 0000000..9f01522
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
@@ -0,0 +1,318 @@
+//=- NSErrorChecker.cpp - Coding conventions for uses of NSError -*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines a CheckNSError, a flow-insenstive check
+//  that determines if an Objective-C class interface correctly returns
+//  a non-void return type.
+//
+//  File under feature request PR 2600.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+static bool IsNSError(QualType T, IdentifierInfo *II);
+static bool IsCFError(QualType T, IdentifierInfo *II);
+
+//===----------------------------------------------------------------------===//
+// NSErrorMethodChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class NSErrorMethodChecker
+    : public Checker< check::ASTDecl<ObjCMethodDecl> > {
+  mutable IdentifierInfo *II;
+
+public:
+  NSErrorMethodChecker() : II(0) { }
+
+  void checkASTDecl(const ObjCMethodDecl *D,
+                    AnalysisManager &mgr, BugReporter &BR) const;
+};
+}
+
+void NSErrorMethodChecker::checkASTDecl(const ObjCMethodDecl *D,
+                                        AnalysisManager &mgr,
+                                        BugReporter &BR) const {
+  if (!D->isThisDeclarationADefinition())
+    return;
+  if (!D->getResultType()->isVoidType())
+    return;
+
+  if (!II)
+    II = &D->getASTContext().Idents.get("NSError"); 
+
+  bool hasNSError = false;
+  for (ObjCMethodDecl::param_const_iterator
+         I = D->param_begin(), E = D->param_end(); I != E; ++I)  {
+    if (IsNSError((*I)->getType(), II)) {
+      hasNSError = true;
+      break;
+    }
+  }
+
+  if (hasNSError) {
+    const char *err = "Method accepting NSError** "
+        "should have a non-void return value to indicate whether or not an "
+        "error occurred";
+    PathDiagnosticLocation L =
+      PathDiagnosticLocation::create(D, BR.getSourceManager());
+    BR.EmitBasicReport(D, "Bad return type when passing NSError**",
+                       "Coding conventions (Apple)", err, L);
+  }
+}
+
+//===----------------------------------------------------------------------===//
+// CFErrorFunctionChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class CFErrorFunctionChecker
+    : public Checker< check::ASTDecl<FunctionDecl> > {
+  mutable IdentifierInfo *II;
+
+public:
+  CFErrorFunctionChecker() : II(0) { }
+
+  void checkASTDecl(const FunctionDecl *D,
+                    AnalysisManager &mgr, BugReporter &BR) const;
+};
+}
+
+void CFErrorFunctionChecker::checkASTDecl(const FunctionDecl *D,
+                                        AnalysisManager &mgr,
+                                        BugReporter &BR) const {
+  if (!D->doesThisDeclarationHaveABody())
+    return;
+  if (!D->getResultType()->isVoidType())
+    return;
+
+  if (!II)
+    II = &D->getASTContext().Idents.get("CFErrorRef"); 
+
+  bool hasCFError = false;
+  for (FunctionDecl::param_const_iterator
+         I = D->param_begin(), E = D->param_end(); I != E; ++I)  {
+    if (IsCFError((*I)->getType(), II)) {
+      hasCFError = true;
+      break;
+    }
+  }
+
+  if (hasCFError) {
+    const char *err = "Function accepting CFErrorRef* "
+        "should have a non-void return value to indicate whether or not an "
+        "error occurred";
+    PathDiagnosticLocation L =
+      PathDiagnosticLocation::create(D, BR.getSourceManager());
+    BR.EmitBasicReport(D, "Bad return type when passing CFErrorRef*",
+                       "Coding conventions (Apple)", err, L);
+  }
+}
+
+//===----------------------------------------------------------------------===//
+// NSOrCFErrorDerefChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class NSErrorDerefBug : public BugType {
+public:
+  NSErrorDerefBug() : BugType("NSError** null dereference",
+                              "Coding conventions (Apple)") {}
+};
+
+class CFErrorDerefBug : public BugType {
+public:
+  CFErrorDerefBug() : BugType("CFErrorRef* null dereference",
+                              "Coding conventions (Apple)") {}
+};
+
+}
+
+namespace {
+class NSOrCFErrorDerefChecker
+    : public Checker< check::Location,
+                        check::Event<ImplicitNullDerefEvent> > {
+  mutable IdentifierInfo *NSErrorII, *CFErrorII;
+public:
+  bool ShouldCheckNSError, ShouldCheckCFError;
+  NSOrCFErrorDerefChecker() : NSErrorII(0), CFErrorII(0),
+                              ShouldCheckNSError(0), ShouldCheckCFError(0) { }
+
+  void checkLocation(SVal loc, bool isLoad, const Stmt *S,
+                     CheckerContext &C) const;
+  void checkEvent(ImplicitNullDerefEvent event) const;
+};
+}
+
+typedef llvm::ImmutableMap<SymbolRef, unsigned> ErrorOutFlag;
+REGISTER_TRAIT_WITH_PROGRAMSTATE(NSErrorOut, ErrorOutFlag)
+REGISTER_TRAIT_WITH_PROGRAMSTATE(CFErrorOut, ErrorOutFlag)
+
+template <typename T>
+static bool hasFlag(SVal val, ProgramStateRef state) {
+  if (SymbolRef sym = val.getAsSymbol())
+    if (const unsigned *attachedFlags = state->get<T>(sym))
+      return *attachedFlags;
+  return false;
+}
+
+template <typename T>
+static void setFlag(ProgramStateRef state, SVal val, CheckerContext &C) {
+  // We tag the symbol that the SVal wraps.
+  if (SymbolRef sym = val.getAsSymbol())
+    C.addTransition(state->set<T>(sym, true));
+}
+
+static QualType parameterTypeFromSVal(SVal val, CheckerContext &C) {
+  const StackFrameContext *
+    SFC = C.getLocationContext()->getCurrentStackFrame();
+  if (Optional<loc::MemRegionVal> X = val.getAs<loc::MemRegionVal>()) {
+    const MemRegion* R = X->getRegion();
+    if (const VarRegion *VR = R->getAs<VarRegion>())
+      if (const StackArgumentsSpaceRegion *
+          stackReg = dyn_cast<StackArgumentsSpaceRegion>(VR->getMemorySpace()))
+        if (stackReg->getStackFrame() == SFC)
+          return VR->getValueType();
+  }
+
+  return QualType();
+}
+
+void NSOrCFErrorDerefChecker::checkLocation(SVal loc, bool isLoad,
+                                            const Stmt *S,
+                                            CheckerContext &C) const {
+  if (!isLoad)
+    return;
+  if (loc.isUndef() || !loc.getAs<Loc>())
+    return;
+
+  ASTContext &Ctx = C.getASTContext();
+  ProgramStateRef state = C.getState();
+
+  // If we are loading from NSError**/CFErrorRef* parameter, mark the resulting
+  // SVal so that we can later check it when handling the
+  // ImplicitNullDerefEvent event.
+  // FIXME: Cumbersome! Maybe add hook at construction of SVals at start of
+  // function ?
+
+  QualType parmT = parameterTypeFromSVal(loc, C);
+  if (parmT.isNull())
+    return;
+
+  if (!NSErrorII)
+    NSErrorII = &Ctx.Idents.get("NSError");
+  if (!CFErrorII)
+    CFErrorII = &Ctx.Idents.get("CFErrorRef");
+
+  if (ShouldCheckNSError && IsNSError(parmT, NSErrorII)) {
+    setFlag<NSErrorOut>(state, state->getSVal(loc.castAs<Loc>()), C);
+    return;
+  }
+
+  if (ShouldCheckCFError && IsCFError(parmT, CFErrorII)) {
+    setFlag<CFErrorOut>(state, state->getSVal(loc.castAs<Loc>()), C);
+    return;
+  }
+}
+
+void NSOrCFErrorDerefChecker::checkEvent(ImplicitNullDerefEvent event) const {
+  if (event.IsLoad)
+    return;
+
+  SVal loc = event.Location;
+  ProgramStateRef state = event.SinkNode->getState();
+  BugReporter &BR = *event.BR;
+
+  bool isNSError = hasFlag<NSErrorOut>(loc, state);
+  bool isCFError = false;
+  if (!isNSError)
+    isCFError = hasFlag<CFErrorOut>(loc, state);
+
+  if (!(isNSError || isCFError))
+    return;
+
+  // Storing to possible null NSError/CFErrorRef out parameter.
+  SmallString<128> Buf;
+  llvm::raw_svector_ostream os(Buf);
+
+  os << "Potential null dereference.  According to coding standards ";
+  os << (isNSError
+         ? "in 'Creating and Returning NSError Objects' the parameter"
+         : "documented in CoreFoundation/CFError.h the parameter");
+
+  os  << " may be null";
+
+  BugType *bug = 0;
+  if (isNSError)
+    bug = new NSErrorDerefBug();
+  else
+    bug = new CFErrorDerefBug();
+  BugReport *report = new BugReport(*bug, os.str(),
+                                                    event.SinkNode);
+  BR.emitReport(report);
+}
+
+static bool IsNSError(QualType T, IdentifierInfo *II) {
+
+  const PointerType* PPT = T->getAs<PointerType>();
+  if (!PPT)
+    return false;
+
+  const ObjCObjectPointerType* PT =
+    PPT->getPointeeType()->getAs<ObjCObjectPointerType>();
+
+  if (!PT)
+    return false;
+
+  const ObjCInterfaceDecl *ID = PT->getInterfaceDecl();
+
+  // FIXME: Can ID ever be NULL?
+  if (ID)
+    return II == ID->getIdentifier();
+
+  return false;
+}
+
+static bool IsCFError(QualType T, IdentifierInfo *II) {
+  const PointerType* PPT = T->getAs<PointerType>();
+  if (!PPT) return false;
+
+  const TypedefType* TT = PPT->getPointeeType()->getAs<TypedefType>();
+  if (!TT) return false;
+
+  return TT->getDecl()->getIdentifier() == II;
+}
+
+void ento::registerNSErrorChecker(CheckerManager &mgr) {
+  mgr.registerChecker<NSErrorMethodChecker>();
+  NSOrCFErrorDerefChecker *
+    checker = mgr.registerChecker<NSOrCFErrorDerefChecker>();
+  checker->ShouldCheckNSError = true;
+}
+
+void ento::registerCFErrorChecker(CheckerManager &mgr) {
+  mgr.registerChecker<CFErrorFunctionChecker>();
+  NSOrCFErrorDerefChecker *
+    checker = mgr.registerChecker<NSOrCFErrorDerefChecker>();
+  checker->ShouldCheckCFError = true;
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
new file mode 100644
index 0000000..0009e1b
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
@@ -0,0 +1,156 @@
+//=== NoReturnFunctionChecker.cpp -------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines NoReturnFunctionChecker, which evaluates functions that do not
+// return to the caller.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/Attr.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/ADT/StringSwitch.h"
+#include <cstdarg>
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class NoReturnFunctionChecker : public Checker< check::PostStmt<CallExpr>,
+                                                check::PostObjCMessage > {
+public:
+  void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
+  void checkPostObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
+};
+
+}
+
+void NoReturnFunctionChecker::checkPostStmt(const CallExpr *CE,
+                                            CheckerContext &C) const {
+  ProgramStateRef state = C.getState();
+  const Expr *Callee = CE->getCallee();
+
+  bool BuildSinks = getFunctionExtInfo(Callee->getType()).getNoReturn();
+
+  if (!BuildSinks) {
+    SVal L = state->getSVal(Callee, C.getLocationContext());
+    const FunctionDecl *FD = L.getAsFunctionDecl();
+    if (!FD)
+      return;
+
+    if (FD->getAttr<AnalyzerNoReturnAttr>() || FD->isNoReturn())
+      BuildSinks = true;
+    else if (const IdentifierInfo *II = FD->getIdentifier()) {
+      // HACK: Some functions are not marked noreturn, and don't return.
+      //  Here are a few hardwired ones.  If this takes too long, we can
+      //  potentially cache these results.
+      BuildSinks
+        = llvm::StringSwitch<bool>(StringRef(II->getName()))
+            .Case("exit", true)
+            .Case("panic", true)
+            .Case("error", true)
+            .Case("Assert", true)
+            // FIXME: This is just a wrapper around throwing an exception.
+            //  Eventually inter-procedural analysis should handle this easily.
+            .Case("ziperr", true)
+            .Case("assfail", true)
+            .Case("db_error", true)
+            .Case("__assert", true)
+            .Case("__assert_rtn", true)
+            .Case("__assert_fail", true)
+            .Case("dtrace_assfail", true)
+            .Case("yy_fatal_error", true)
+            .Case("_XCAssertionFailureHandler", true)
+            .Case("_DTAssertionFailureHandler", true)
+            .Case("_TSAssertionFailureHandler", true)
+            .Default(false);
+    }
+  }
+
+  if (BuildSinks)
+    C.generateSink();
+}
+
+static bool END_WITH_NULL isMultiArgSelector(const Selector *Sel, ...) {
+  va_list argp;
+  va_start(argp, Sel);
+
+  unsigned Slot = 0;
+  const char *Arg;
+  while ((Arg = va_arg(argp, const char *))) {
+    if (!Sel->getNameForSlot(Slot).equals(Arg))
+      break; // still need to va_end!
+    ++Slot;
+  }
+
+  va_end(argp);
+
+  // We only succeeded if we made it to the end of the argument list.
+  return (Arg == NULL);
+}
+
+void NoReturnFunctionChecker::checkPostObjCMessage(const ObjCMethodCall &Msg,
+                                                   CheckerContext &C) const {
+  // Check if the method is annotated with analyzer_noreturn.
+  if (const ObjCMethodDecl *MD = Msg.getDecl()) {
+    MD = MD->getCanonicalDecl();
+    if (MD->hasAttr<AnalyzerNoReturnAttr>()) {
+      C.generateSink();
+      return;
+    }
+  }
+
+  // HACK: This entire check is to handle two messages in the Cocoa frameworks:
+  // -[NSAssertionHandler
+  //    handleFailureInMethod:object:file:lineNumber:description:]
+  // -[NSAssertionHandler
+  //    handleFailureInFunction:file:lineNumber:description:]
+  // Eventually these should be annotated with __attribute__((noreturn)).
+  // Because ObjC messages use dynamic dispatch, it is not generally safe to
+  // assume certain methods can't return. In cases where it is definitely valid,
+  // see if you can mark the methods noreturn or analyzer_noreturn instead of
+  // adding more explicit checks to this method.
+
+  if (!Msg.isInstanceMessage())
+    return;
+
+  const ObjCInterfaceDecl *Receiver = Msg.getReceiverInterface();
+  if (!Receiver)
+    return;
+  if (!Receiver->getIdentifier()->isStr("NSAssertionHandler"))
+    return;
+
+  Selector Sel = Msg.getSelector();
+  switch (Sel.getNumArgs()) {
+  default:
+    return;
+  case 4:
+    if (!isMultiArgSelector(&Sel, "handleFailureInFunction", "file",
+                            "lineNumber", "description", NULL))
+      return;
+    break;
+  case 5:
+    if (!isMultiArgSelector(&Sel, "handleFailureInMethod", "object", "file",
+                            "lineNumber", "description", NULL))
+      return;
+    break;
+  }
+
+  // If we got here, it's one of the messages we care about.
+  C.generateSink();
+}
+
+
+void ento::registerNoReturnFunctionChecker(CheckerManager &mgr) {
+  mgr.registerChecker<NoReturnFunctionChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
new file mode 100644
index 0000000..273a7a3
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
@@ -0,0 +1,193 @@
+//===--- NonNullParamChecker.cpp - Undefined arguments checker -*- C++ -*--===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines NonNullParamChecker, which checks for arguments expected not to
+// be null due to:
+//   - the corresponding parameters being declared to have nonnull attribute
+//   - the corresponding parameters being references; since the call would form
+//     a reference to a null pointer
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/Attr.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class NonNullParamChecker
+  : public Checker< check::PreCall > {
+  mutable OwningPtr<BugType> BTAttrNonNull;
+  mutable OwningPtr<BugType> BTNullRefArg;
+public:
+
+  void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+
+  BugReport *genReportNullAttrNonNull(const ExplodedNode *ErrorN,
+                                      const Expr *ArgE) const;
+  BugReport *genReportReferenceToNullPointer(const ExplodedNode *ErrorN,
+                                             const Expr *ArgE) const;
+};
+} // end anonymous namespace
+
+void NonNullParamChecker::checkPreCall(const CallEvent &Call,
+                                      CheckerContext &C) const {
+  const Decl *FD = Call.getDecl();
+  if (!FD)
+    return;
+
+  const NonNullAttr *Att = FD->getAttr<NonNullAttr>();
+
+  ProgramStateRef state = C.getState();
+
+  CallEvent::param_type_iterator TyI = Call.param_type_begin(),
+                                 TyE = Call.param_type_end();
+
+  for (unsigned idx = 0, count = Call.getNumArgs(); idx != count; ++idx){
+
+    // Check if the parameter is a reference. We want to report when reference
+    // to a null pointer is passed as a paramter.
+    bool haveRefTypeParam = false;
+    if (TyI != TyE) {
+      haveRefTypeParam = (*TyI)->isReferenceType();
+      TyI++;
+    }
+
+    bool haveAttrNonNull = Att && Att->isNonNull(idx);
+
+    if (!haveRefTypeParam && !haveAttrNonNull)
+      continue;
+
+    // If the value is unknown or undefined, we can't perform this check.
+    const Expr *ArgE = Call.getArgExpr(idx);
+    SVal V = Call.getArgSVal(idx);
+    Optional<DefinedSVal> DV = V.getAs<DefinedSVal>();
+    if (!DV)
+      continue;
+
+    // Process the case when the argument is not a location.
+    assert(!haveRefTypeParam || DV->getAs<Loc>());
+
+    if (haveAttrNonNull && !DV->getAs<Loc>()) {
+      // If the argument is a union type, we want to handle a potential
+      // transparent_union GCC extension.
+      if (!ArgE)
+        continue;
+
+      QualType T = ArgE->getType();
+      const RecordType *UT = T->getAsUnionType();
+      if (!UT || !UT->getDecl()->hasAttr<TransparentUnionAttr>())
+        continue;
+
+      if (Optional<nonloc::CompoundVal> CSV =
+              DV->getAs<nonloc::CompoundVal>()) {
+        nonloc::CompoundVal::iterator CSV_I = CSV->begin();
+        assert(CSV_I != CSV->end());
+        V = *CSV_I;
+        DV = V.getAs<DefinedSVal>();
+        assert(++CSV_I == CSV->end());
+        if (!DV)
+          continue;
+        // Retrieve the corresponding expression.
+        if (const CompoundLiteralExpr *CE = dyn_cast<CompoundLiteralExpr>(ArgE))
+          if (const InitListExpr *IE =
+                dyn_cast<InitListExpr>(CE->getInitializer()))
+             ArgE = dyn_cast<Expr>(*(IE->begin()));
+
+      } else {
+        // FIXME: Handle LazyCompoundVals?
+        continue;
+      }
+    }
+
+    ConstraintManager &CM = C.getConstraintManager();
+    ProgramStateRef stateNotNull, stateNull;
+    llvm::tie(stateNotNull, stateNull) = CM.assumeDual(state, *DV);
+
+    if (stateNull && !stateNotNull) {
+      // Generate an error node.  Check for a null node in case
+      // we cache out.
+      if (ExplodedNode *errorNode = C.generateSink(stateNull)) {
+
+        BugReport *R = 0;
+        if (haveAttrNonNull)
+          R = genReportNullAttrNonNull(errorNode, ArgE);
+        else if (haveRefTypeParam)
+          R = genReportReferenceToNullPointer(errorNode, ArgE);
+
+        // Highlight the range of the argument that was null.
+        R->addRange(Call.getArgSourceRange(idx));
+
+        // Emit the bug report.
+        C.emitReport(R);
+      }
+
+      // Always return.  Either we cached out or we just emitted an error.
+      return;
+    }
+
+    // If a pointer value passed the check we should assume that it is
+    // indeed not null from this point forward.
+    assert(stateNotNull);
+    state = stateNotNull;
+  }
+
+  // If we reach here all of the arguments passed the nonnull check.
+  // If 'state' has been updated generated a new node.
+  C.addTransition(state);
+}
+
+BugReport *NonNullParamChecker::genReportNullAttrNonNull(
+  const ExplodedNode *ErrorNode, const Expr *ArgE) const {
+  // Lazily allocate the BugType object if it hasn't already been
+  // created. Ownership is transferred to the BugReporter object once
+  // the BugReport is passed to 'EmitWarning'.
+  if (!BTAttrNonNull)
+    BTAttrNonNull.reset(new BugType(
+                            "Argument with 'nonnull' attribute passed null",
+                            "API"));
+
+  BugReport *R = new BugReport(*BTAttrNonNull,
+                  "Null pointer passed as an argument to a 'nonnull' parameter",
+                  ErrorNode);
+  if (ArgE)
+    bugreporter::trackNullOrUndefValue(ErrorNode, ArgE, *R);
+
+  return R;
+}
+
+BugReport *NonNullParamChecker::genReportReferenceToNullPointer(
+  const ExplodedNode *ErrorNode, const Expr *ArgE) const {
+  if (!BTNullRefArg)
+    BTNullRefArg.reset(new BuiltinBug("Dereference of null pointer"));
+
+  BugReport *R = new BugReport(*BTNullRefArg,
+                               "Forming reference to null pointer",
+                               ErrorNode);
+  if (ArgE) {
+    const Expr *ArgEDeref = bugreporter::getDerefExpr(ArgE);
+    if (ArgEDeref == 0)
+      ArgEDeref = ArgE;
+    bugreporter::trackNullOrUndefValue(ErrorNode,
+                                       ArgEDeref,
+                                       *R);
+  }
+  return R;
+
+}
+
+void ento::registerNonNullParamChecker(CheckerManager &mgr) {
+  mgr.registerChecker<NonNullParamChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
new file mode 100644
index 0000000..4018a66
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
@@ -0,0 +1,93 @@
+//== ObjCAtSyncChecker.cpp - nil mutex checker for @synchronized -*- C++ -*--=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines ObjCAtSyncChecker, a builtin check that checks for null pointers
+// used as mutexes for @synchronized.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class ObjCAtSyncChecker
+    : public Checker< check::PreStmt<ObjCAtSynchronizedStmt> > {
+  mutable OwningPtr<BuiltinBug> BT_null;
+  mutable OwningPtr<BuiltinBug> BT_undef;
+
+public:
+  void checkPreStmt(const ObjCAtSynchronizedStmt *S, CheckerContext &C) const;
+};
+} // end anonymous namespace
+
+void ObjCAtSyncChecker::checkPreStmt(const ObjCAtSynchronizedStmt *S,
+                                     CheckerContext &C) const {
+
+  const Expr *Ex = S->getSynchExpr();
+  ProgramStateRef state = C.getState();
+  SVal V = state->getSVal(Ex, C.getLocationContext());
+
+  // Uninitialized value used for the mutex?
+  if (V.getAs<UndefinedVal>()) {
+    if (ExplodedNode *N = C.generateSink()) {
+      if (!BT_undef)
+        BT_undef.reset(new BuiltinBug("Uninitialized value used as mutex "
+                                  "for @synchronized"));
+      BugReport *report =
+        new BugReport(*BT_undef, BT_undef->getDescription(), N);
+      bugreporter::trackNullOrUndefValue(N, Ex, *report);
+      C.emitReport(report);
+    }
+    return;
+  }
+
+  if (V.isUnknown())
+    return;
+
+  // Check for null mutexes.
+  ProgramStateRef notNullState, nullState;
+  llvm::tie(notNullState, nullState) = state->assume(V.castAs<DefinedSVal>());
+
+  if (nullState) {
+    if (!notNullState) {
+      // Generate an error node.  This isn't a sink since
+      // a null mutex just means no synchronization occurs.
+      if (ExplodedNode *N = C.addTransition(nullState)) {
+        if (!BT_null)
+          BT_null.reset(new BuiltinBug("Nil value used as mutex for @synchronized() "
+                                   "(no synchronization will occur)"));
+        BugReport *report =
+          new BugReport(*BT_null, BT_null->getDescription(), N);
+        bugreporter::trackNullOrUndefValue(N, Ex, *report);
+
+        C.emitReport(report);
+        return;
+      }
+    }
+    // Don't add a transition for 'nullState'.  If the value is
+    // under-constrained to be null or non-null, assume it is non-null
+    // afterwards.
+  }
+
+  if (notNullState)
+    C.addTransition(notNullState);
+}
+
+void ento::registerObjCAtSyncChecker(CheckerManager &mgr) {
+  if (mgr.getLangOpts().ObjC2)
+    mgr.registerChecker<ObjCAtSyncChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
new file mode 100644
index 0000000..4a0309d
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
@@ -0,0 +1,175 @@
+//== ObjCContainersASTChecker.cpp - CoreFoundation containers API *- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// An AST checker that looks for common pitfalls when using 'CFArray',
+// 'CFDictionary', 'CFSet' APIs.
+//
+//===----------------------------------------------------------------------===//
+#include "ClangSACheckers.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class WalkAST : public StmtVisitor<WalkAST> {
+  BugReporter &BR;
+  AnalysisDeclContext* AC;
+  ASTContext &ASTC;
+  uint64_t PtrWidth;
+
+  /// Check if the type has pointer size (very conservative).
+  inline bool isPointerSize(const Type *T) {
+    if (!T)
+      return true;
+    if (T->isIncompleteType())
+      return true;
+    return (ASTC.getTypeSize(T) == PtrWidth);
+  }
+
+  /// Check if the type is a pointer/array to pointer sized values.
+  inline bool hasPointerToPointerSizedType(const Expr *E) {
+    QualType T = E->getType();
+
+    // The type could be either a pointer or array.
+    const Type *TP = T.getTypePtr();
+    QualType PointeeT = TP->getPointeeType();
+    if (!PointeeT.isNull()) {
+      // If the type is a pointer to an array, check the size of the array
+      // elements. To avoid false positives coming from assumption that the
+      // values x and &x are equal when x is an array.
+      if (const Type *TElem = PointeeT->getArrayElementTypeNoTypeQual())
+        if (isPointerSize(TElem))
+          return true;
+
+      // Else, check the pointee size.
+      return isPointerSize(PointeeT.getTypePtr());
+    }
+
+    if (const Type *TElem = TP->getArrayElementTypeNoTypeQual())
+      return isPointerSize(TElem);
+
+    // The type must be an array/pointer type.
+
+    // This could be a null constant, which is allowed.
+    if (E->isNullPointerConstant(ASTC, Expr::NPC_ValueDependentIsNull))
+      return true;
+    return false;
+  }
+
+public:
+  WalkAST(BugReporter &br, AnalysisDeclContext* ac)
+  : BR(br), AC(ac), ASTC(AC->getASTContext()),
+    PtrWidth(ASTC.getTargetInfo().getPointerWidth(0)) {}
+
+  // Statement visitor methods.
+  void VisitChildren(Stmt *S);
+  void VisitStmt(Stmt *S) { VisitChildren(S); }
+  void VisitCallExpr(CallExpr *CE);
+};
+} // end anonymous namespace
+
+static StringRef getCalleeName(CallExpr *CE) {
+  const FunctionDecl *FD = CE->getDirectCallee();
+  if (!FD)
+    return StringRef();
+
+  IdentifierInfo *II = FD->getIdentifier();
+  if (!II)   // if no identifier, not a simple C function
+    return StringRef();
+
+  return II->getName();
+}
+
+void WalkAST::VisitCallExpr(CallExpr *CE) {
+  StringRef Name = getCalleeName(CE);
+  if (Name.empty())
+    return;
+
+  const Expr *Arg = 0;
+  unsigned ArgNum;
+
+  if (Name.equals("CFArrayCreate") || Name.equals("CFSetCreate")) {
+    if (CE->getNumArgs() != 4)
+      return;
+    ArgNum = 1;
+    Arg = CE->getArg(ArgNum)->IgnoreParenCasts();
+    if (hasPointerToPointerSizedType(Arg))
+        return;
+  } else if (Name.equals("CFDictionaryCreate")) {
+    if (CE->getNumArgs() != 6)
+      return;
+    // Check first argument.
+    ArgNum = 1;
+    Arg = CE->getArg(ArgNum)->IgnoreParenCasts();
+    if (hasPointerToPointerSizedType(Arg)) {
+      // Check second argument.
+      ArgNum = 2;
+      Arg = CE->getArg(ArgNum)->IgnoreParenCasts();
+      if (hasPointerToPointerSizedType(Arg))
+        // Both are good, return.
+        return;
+    }
+  }
+
+  if (Arg) {
+    assert(ArgNum == 1 || ArgNum == 2);
+
+    SmallString<64> BufName;
+    llvm::raw_svector_ostream OsName(BufName);
+    OsName << " Invalid use of '" << Name << "'" ;
+
+    SmallString<256> Buf;
+    llvm::raw_svector_ostream Os(Buf);
+    // Use "second" and "third" since users will expect 1-based indexing
+    // for parameter names when mentioned in prose.
+    Os << " The "<< ((ArgNum == 1) ? "second" : "third") << " argument to '"
+        << Name << "' must be a C array of pointer-sized values, not '"
+        << Arg->getType().getAsString() << "'";
+
+    SourceRange R = Arg->getSourceRange();
+    PathDiagnosticLocation CELoc =
+        PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+    BR.EmitBasicReport(AC->getDecl(),
+                       OsName.str(), categories::CoreFoundationObjectiveC,
+                       Os.str(), CELoc, &R, 1);
+  }
+
+  // Recurse and check children.
+  VisitChildren(CE);
+}
+
+void WalkAST::VisitChildren(Stmt *S) {
+  for (Stmt::child_iterator I = S->child_begin(), E = S->child_end(); I!=E; ++I)
+    if (Stmt *child = *I)
+      Visit(child);
+}
+
+namespace {
+class ObjCContainersASTChecker : public Checker<check::ASTCodeBody> {
+public:
+
+  void checkASTCodeBody(const Decl *D, AnalysisManager& Mgr,
+                        BugReporter &BR) const {
+    WalkAST walker(BR, Mgr.getAnalysisDeclContext(D));
+    walker.Visit(D->getBody());
+  }
+};
+}
+
+void ento::registerObjCContainersASTChecker(CheckerManager &mgr) {
+  mgr.registerChecker<ObjCContainersASTChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
new file mode 100644
index 0000000..b9e96ee
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
@@ -0,0 +1,151 @@
+//== ObjCContainersChecker.cpp - Path sensitive checker for CFArray *- C++ -*=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Performs path sensitive checks of Core Foundation static containers like
+// CFArray.
+// 1) Check for buffer overflows:
+//      In CFArrayGetArrayAtIndex( myArray, index), if the index is outside the
+//      index space of theArray (0 to N-1 inclusive (where N is the count of
+//      theArray), the behavior is undefined.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class ObjCContainersChecker : public Checker< check::PreStmt<CallExpr>,
+                                             check::PostStmt<CallExpr> > {
+  mutable OwningPtr<BugType> BT;
+  inline void initBugType() const {
+    if (!BT)
+      BT.reset(new BugType("CFArray API",
+                           categories::CoreFoundationObjectiveC));
+  }
+
+  inline SymbolRef getArraySym(const Expr *E, CheckerContext &C) const {
+    SVal ArrayRef = C.getState()->getSVal(E, C.getLocationContext());
+    SymbolRef ArraySym = ArrayRef.getAsSymbol();
+    return ArraySym;
+  }
+
+  void addSizeInfo(const Expr *Array, const Expr *Size,
+                   CheckerContext &C) const;
+
+public:
+  /// A tag to id this checker.
+  static void *getTag() { static int Tag; return &Tag; }
+
+  void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
+  void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+};
+} // end anonymous namespace
+
+// ProgramState trait - a map from array symbol to its state.
+REGISTER_MAP_WITH_PROGRAMSTATE(ArraySizeMap, SymbolRef, DefinedSVal)
+
+void ObjCContainersChecker::addSizeInfo(const Expr *Array, const Expr *Size,
+                                        CheckerContext &C) const {
+  ProgramStateRef State = C.getState();
+  SVal SizeV = State->getSVal(Size, C.getLocationContext());
+  // Undefined is reported by another checker.
+  if (SizeV.isUnknownOrUndef())
+    return;
+
+  // Get the ArrayRef symbol.
+  SVal ArrayRef = State->getSVal(Array, C.getLocationContext());
+  SymbolRef ArraySym = ArrayRef.getAsSymbol();
+  if (!ArraySym)
+    return;
+
+  C.addTransition(
+      State->set<ArraySizeMap>(ArraySym, SizeV.castAs<DefinedSVal>()));
+  return;
+}
+
+void ObjCContainersChecker::checkPostStmt(const CallExpr *CE,
+                                          CheckerContext &C) const {
+  StringRef Name = C.getCalleeName(CE);
+  if (Name.empty() || CE->getNumArgs() < 1)
+    return;
+
+  // Add array size information to the state.
+  if (Name.equals("CFArrayCreate")) {
+    if (CE->getNumArgs() < 3)
+      return;
+    // Note, we can visit the Create method in the post-visit because
+    // the CFIndex parameter is passed in by value and will not be invalidated
+    // by the call.
+    addSizeInfo(CE, CE->getArg(2), C);
+    return;
+  }
+
+  if (Name.equals("CFArrayGetCount")) {
+    addSizeInfo(CE->getArg(0), CE, C);
+    return;
+  }
+}
+
+void ObjCContainersChecker::checkPreStmt(const CallExpr *CE,
+                                         CheckerContext &C) const {
+  StringRef Name = C.getCalleeName(CE);
+  if (Name.empty() || CE->getNumArgs() < 2)
+    return;
+
+  // Check the array access.
+  if (Name.equals("CFArrayGetValueAtIndex")) {
+    ProgramStateRef State = C.getState();
+    // Retrieve the size.
+    // Find out if we saw this array symbol before and have information about it.
+    const Expr *ArrayExpr = CE->getArg(0);
+    SymbolRef ArraySym = getArraySym(ArrayExpr, C);
+    if (!ArraySym)
+      return;
+
+    const DefinedSVal *Size = State->get<ArraySizeMap>(ArraySym);
+
+    if (!Size)
+      return;
+
+    // Get the index.
+    const Expr *IdxExpr = CE->getArg(1);
+    SVal IdxVal = State->getSVal(IdxExpr, C.getLocationContext());
+    if (IdxVal.isUnknownOrUndef())
+      return;
+    DefinedSVal Idx = IdxVal.castAs<DefinedSVal>();
+    
+    // Now, check if 'Idx in [0, Size-1]'.
+    const QualType T = IdxExpr->getType();
+    ProgramStateRef StInBound = State->assumeInBound(Idx, *Size, true, T);
+    ProgramStateRef StOutBound = State->assumeInBound(Idx, *Size, false, T);
+    if (StOutBound && !StInBound) {
+      ExplodedNode *N = C.generateSink(StOutBound);
+      if (!N)
+        return;
+      initBugType();
+      BugReport *R = new BugReport(*BT, "Index is out of bounds", N);
+      R->addRange(IdxExpr->getSourceRange());
+      C.emitReport(R);
+      return;
+    }
+  }
+}
+
+/// Register checker.
+void ento::registerObjCContainersChecker(CheckerManager &mgr) {
+  mgr.registerChecker<ObjCContainersChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
new file mode 100644
index 0000000..789b9f4
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp
@@ -0,0 +1,269 @@
+//==- ObjCMissingSuperCallChecker.cpp - Check missing super-calls in ObjC --==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines a ObjCMissingSuperCallChecker, a checker that
+//  analyzes a UIViewController implementation to determine if it
+//  correctly calls super in the methods where this is mandatory.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+struct SelectorDescriptor {
+  const char *SelectorName;
+  unsigned ArgumentCount;
+};
+}
+
+//===----------------------------------------------------------------------===//
+// FindSuperCallVisitor - Identify specific calls to the superclass.
+//===----------------------------------------------------------------------===//
+
+class FindSuperCallVisitor : public RecursiveASTVisitor<FindSuperCallVisitor> {
+public:
+  explicit FindSuperCallVisitor(Selector S) : DoesCallSuper(false), Sel(S) {}
+
+  bool VisitObjCMessageExpr(ObjCMessageExpr *E) {
+    if (E->getSelector() == Sel)
+      if (E->getReceiverKind() == ObjCMessageExpr::SuperInstance)
+        DoesCallSuper = true;
+
+    // Recurse if we didn't find the super call yet.
+    return !DoesCallSuper; 
+  }
+
+  bool DoesCallSuper;
+
+private:
+  Selector Sel;
+};
+
+//===----------------------------------------------------------------------===//
+// ObjCSuperCallChecker 
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ObjCSuperCallChecker : public Checker<
+                                      check::ASTDecl<ObjCImplementationDecl> > {
+public:
+  ObjCSuperCallChecker() : IsInitialized(false) {}
+
+  void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager &Mgr,
+                    BugReporter &BR) const;
+private:
+  bool isCheckableClass(const ObjCImplementationDecl *D,
+                        StringRef &SuperclassName) const;
+  void initializeSelectors(ASTContext &Ctx) const;
+  void fillSelectors(ASTContext &Ctx, ArrayRef<SelectorDescriptor> Sel,
+                     StringRef ClassName) const;
+  mutable llvm::StringMap<llvm::SmallSet<Selector, 16> > SelectorsForClass;
+  mutable bool IsInitialized;
+};
+
+}
+
+/// \brief Determine whether the given class has a superclass that we want
+/// to check. The name of the found superclass is stored in SuperclassName.
+///
+/// \param D The declaration to check for superclasses.
+/// \param[out] SuperclassName On return, the found superclass name.
+bool ObjCSuperCallChecker::isCheckableClass(const ObjCImplementationDecl *D,
+                                            StringRef &SuperclassName) const {
+  const ObjCInterfaceDecl *ID = D->getClassInterface();
+  for ( ; ID ; ID = ID->getSuperClass())
+  {
+    SuperclassName = ID->getIdentifier()->getName();
+    if (SelectorsForClass.count(SuperclassName))
+      return true;
+  }
+  return false;
+}
+
+void ObjCSuperCallChecker::fillSelectors(ASTContext &Ctx,
+                                         ArrayRef<SelectorDescriptor> Sel,
+                                         StringRef ClassName) const {
+  llvm::SmallSet<Selector, 16> &ClassSelectors = SelectorsForClass[ClassName];
+  // Fill the Selectors SmallSet with all selectors we want to check.
+  for (ArrayRef<SelectorDescriptor>::iterator I = Sel.begin(), E = Sel.end();
+       I != E; ++I) {
+    SelectorDescriptor Descriptor = *I;
+    assert(Descriptor.ArgumentCount <= 1); // No multi-argument selectors yet.
+
+    // Get the selector.
+    IdentifierInfo *II = &Ctx.Idents.get(Descriptor.SelectorName);
+
+    Selector Sel = Ctx.Selectors.getSelector(Descriptor.ArgumentCount, &II);
+    ClassSelectors.insert(Sel);
+  }
+}
+
+void ObjCSuperCallChecker::initializeSelectors(ASTContext &Ctx) const {
+
+  { // Initialize selectors for: UIViewController
+    const SelectorDescriptor Selectors[] = {
+      { "addChildViewController", 1 },
+      { "viewDidAppear", 1 },
+      { "viewDidDisappear", 1 },
+      { "viewWillAppear", 1 },
+      { "viewWillDisappear", 1 },
+      { "removeFromParentViewController", 0 },
+      { "didReceiveMemoryWarning", 0 },
+      { "viewDidUnload", 0 },
+      { "viewDidLoad", 0 },
+      { "viewWillUnload", 0 },
+      { "updateViewConstraints", 0 },
+      { "encodeRestorableStateWithCoder", 1 },
+      { "restoreStateWithCoder", 1 }};
+
+    fillSelectors(Ctx, Selectors, "UIViewController");
+  }
+
+  { // Initialize selectors for: UIResponder
+    const SelectorDescriptor Selectors[] = {
+      { "resignFirstResponder", 0 }};
+
+    fillSelectors(Ctx, Selectors, "UIResponder");
+  }
+
+  { // Initialize selectors for: NSResponder
+    const SelectorDescriptor Selectors[] = {
+      { "encodeRestorableStateWithCoder", 1 },
+      { "restoreStateWithCoder", 1 }};
+
+    fillSelectors(Ctx, Selectors, "NSResponder");
+  }
+
+  { // Initialize selectors for: NSDocument
+    const SelectorDescriptor Selectors[] = {
+      { "encodeRestorableStateWithCoder", 1 },
+      { "restoreStateWithCoder", 1 }};
+
+    fillSelectors(Ctx, Selectors, "NSDocument");
+  }
+
+  IsInitialized = true;
+}
+
+void ObjCSuperCallChecker::checkASTDecl(const ObjCImplementationDecl *D,
+                                        AnalysisManager &Mgr,
+                                        BugReporter &BR) const {
+  ASTContext &Ctx = BR.getContext();
+
+  // We need to initialize the selector table once.
+  if (!IsInitialized)
+    initializeSelectors(Ctx);
+
+  // Find out whether this class has a superclass that we are supposed to check.
+  StringRef SuperclassName;
+  if (!isCheckableClass(D, SuperclassName))
+    return;
+
+
+  // Iterate over all instance methods.
+  for (ObjCImplementationDecl::instmeth_iterator I = D->instmeth_begin(),
+                                                 E = D->instmeth_end();
+       I != E; ++I) {
+    Selector S = (*I)->getSelector();
+    // Find out whether this is a selector that we want to check.
+    if (!SelectorsForClass[SuperclassName].count(S))
+      continue;
+
+    ObjCMethodDecl *MD = *I;
+
+    // Check if the method calls its superclass implementation.
+    if (MD->getBody())
+    {
+      FindSuperCallVisitor Visitor(S);
+      Visitor.TraverseDecl(MD);
+
+      // It doesn't call super, emit a diagnostic.
+      if (!Visitor.DoesCallSuper) {
+        PathDiagnosticLocation DLoc =
+          PathDiagnosticLocation::createEnd(MD->getBody(),
+                                            BR.getSourceManager(),
+                                            Mgr.getAnalysisDeclContext(D));
+
+        const char *Name = "Missing call to superclass";
+        SmallString<320> Buf;
+        llvm::raw_svector_ostream os(Buf);
+
+        os << "The '" << S.getAsString() 
+           << "' instance method in " << SuperclassName.str() << " subclass '"
+           << *D << "' is missing a [super " << S.getAsString() << "] call";
+
+        BR.EmitBasicReport(MD, Name, categories::CoreFoundationObjectiveC,
+                           os.str(), DLoc);
+      }
+    }
+  }
+}
+
+
+//===----------------------------------------------------------------------===//
+// Check registration.
+//===----------------------------------------------------------------------===//
+
+void ento::registerObjCSuperCallChecker(CheckerManager &Mgr) {
+  Mgr.registerChecker<ObjCSuperCallChecker>();
+}
+
+
+/*
+ ToDo list for expanding this check in the future, the list is not exhaustive.
+ There are also cases where calling super is suggested but not "mandatory".
+ In addition to be able to check the classes and methods below, architectural
+ improvements like being able to allow for the super-call to be done in a called
+ method would be good too.
+
+UIDocument subclasses
+- finishedHandlingError:recovered: (is multi-arg)
+- finishedHandlingError:recovered: (is multi-arg)
+
+UIViewController subclasses
+- loadView (should *never* call super)
+- transitionFromViewController:toViewController:
+         duration:options:animations:completion: (is multi-arg)
+
+UICollectionViewController subclasses
+- loadView (take care because UIViewController subclasses should NOT call super
+            in loadView, but UICollectionViewController subclasses should)
+
+NSObject subclasses
+- doesNotRecognizeSelector (it only has to call super if it doesn't throw)
+
+UIPopoverBackgroundView subclasses (some of those are class methods)
+- arrowDirection (should *never* call super)
+- arrowOffset (should *never* call super)
+- arrowBase (should *never* call super)
+- arrowHeight (should *never* call super)
+- contentViewInsets (should *never* call super)
+
+UITextSelectionRect subclasses (some of those are properties)
+- rect (should *never* call super)
+- range (should *never* call super)
+- writingDirection (should *never* call super)
+- isVertical (should *never* call super)
+- containsStart (should *never* call super)
+- containsEnd (should *never* call super)
+*/
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
new file mode 100644
index 0000000..8506e08
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
@@ -0,0 +1,445 @@
+//== ObjCSelfInitChecker.cpp - Checker for 'self' initialization -*- C++ -*--=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines ObjCSelfInitChecker, a builtin check that checks for uses of
+// 'self' before proper initialization.
+//
+//===----------------------------------------------------------------------===//
+
+// This checks initialization methods to verify that they assign 'self' to the
+// result of an initialization call (e.g. [super init], or [self initWith..])
+// before using 'self' or any instance variable.
+//
+// To perform the required checking, values are tagged with flags that indicate
+// 1) if the object is the one pointed to by 'self', and 2) if the object
+// is the result of an initializer (e.g. [super init]).
+//
+// Uses of an object that is true for 1) but not 2) trigger a diagnostic.
+// The uses that are currently checked are:
+//  - Using instance variables.
+//  - Returning the object.
+//
+// Note that we don't check for an invalid 'self' that is the receiver of an
+// obj-c message expression to cut down false positives where logging functions
+// get information from self (like its class) or doing "invalidation" on self
+// when the initialization fails.
+//
+// Because the object that 'self' points to gets invalidated when a call
+// receives a reference to 'self', the checker keeps track and passes the flags
+// for 1) and 2) to the new object that 'self' points to after the call.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+static bool shouldRunOnFunctionOrMethod(const NamedDecl *ND);
+static bool isInitializationMethod(const ObjCMethodDecl *MD);
+static bool isInitMessage(const ObjCMethodCall &Msg);
+static bool isSelfVar(SVal location, CheckerContext &C);
+
+namespace {
+class ObjCSelfInitChecker : public Checker<  check::PostObjCMessage,
+                                             check::PostStmt<ObjCIvarRefExpr>,
+                                             check::PreStmt<ReturnStmt>,
+                                             check::PreCall,
+                                             check::PostCall,
+                                             check::Location,
+                                             check::Bind > {
+public:
+  void checkPostObjCMessage(const ObjCMethodCall &Msg, CheckerContext &C) const;
+  void checkPostStmt(const ObjCIvarRefExpr *E, CheckerContext &C) const;
+  void checkPreStmt(const ReturnStmt *S, CheckerContext &C) const;
+  void checkLocation(SVal location, bool isLoad, const Stmt *S,
+                     CheckerContext &C) const;
+  void checkBind(SVal loc, SVal val, const Stmt *S, CheckerContext &C) const;
+
+  void checkPreCall(const CallEvent &CE, CheckerContext &C) const;
+  void checkPostCall(const CallEvent &CE, CheckerContext &C) const;
+
+  void printState(raw_ostream &Out, ProgramStateRef State,
+                  const char *NL, const char *Sep) const;
+};
+} // end anonymous namespace
+
+namespace {
+
+class InitSelfBug : public BugType {
+  const std::string desc;
+public:
+  InitSelfBug() : BugType("Missing \"self = [(super or self) init...]\"",
+                          categories::CoreFoundationObjectiveC) {}
+};
+
+} // end anonymous namespace
+
+namespace {
+enum SelfFlagEnum {
+  /// \brief No flag set.
+  SelfFlag_None = 0x0,
+  /// \brief Value came from 'self'.
+  SelfFlag_Self    = 0x1,
+  /// \brief Value came from the result of an initializer (e.g. [super init]).
+  SelfFlag_InitRes = 0x2
+};
+}
+
+REGISTER_MAP_WITH_PROGRAMSTATE(SelfFlag, SymbolRef, unsigned)
+REGISTER_TRAIT_WITH_PROGRAMSTATE(CalledInit, bool)
+
+/// \brief A call receiving a reference to 'self' invalidates the object that
+/// 'self' contains. This keeps the "self flags" assigned to the 'self'
+/// object before the call so we can assign them to the new object that 'self'
+/// points to after the call.
+REGISTER_TRAIT_WITH_PROGRAMSTATE(PreCallSelfFlags, unsigned)
+
+static SelfFlagEnum getSelfFlags(SVal val, ProgramStateRef state) {
+  if (SymbolRef sym = val.getAsSymbol())
+    if (const unsigned *attachedFlags = state->get<SelfFlag>(sym))
+      return (SelfFlagEnum)*attachedFlags;
+  return SelfFlag_None;
+}
+
+static SelfFlagEnum getSelfFlags(SVal val, CheckerContext &C) {
+  return getSelfFlags(val, C.getState());
+}
+
+static void addSelfFlag(ProgramStateRef state, SVal val,
+                        SelfFlagEnum flag, CheckerContext &C) {
+  // We tag the symbol that the SVal wraps.
+  if (SymbolRef sym = val.getAsSymbol()) {
+    state = state->set<SelfFlag>(sym, getSelfFlags(val, state) | flag);
+    C.addTransition(state);
+  }
+}
+
+static bool hasSelfFlag(SVal val, SelfFlagEnum flag, CheckerContext &C) {
+  return getSelfFlags(val, C) & flag;
+}
+
+/// \brief Returns true of the value of the expression is the object that 'self'
+/// points to and is an object that did not come from the result of calling
+/// an initializer.
+static bool isInvalidSelf(const Expr *E, CheckerContext &C) {
+  SVal exprVal = C.getState()->getSVal(E, C.getLocationContext());
+  if (!hasSelfFlag(exprVal, SelfFlag_Self, C))
+    return false; // value did not come from 'self'.
+  if (hasSelfFlag(exprVal, SelfFlag_InitRes, C))
+    return false; // 'self' is properly initialized.
+
+  return true;
+}
+
+static void checkForInvalidSelf(const Expr *E, CheckerContext &C,
+                                const char *errorStr) {
+  if (!E)
+    return;
+  
+  if (!C.getState()->get<CalledInit>())
+    return;
+  
+  if (!isInvalidSelf(E, C))
+    return;
+  
+  // Generate an error node.
+  ExplodedNode *N = C.generateSink();
+  if (!N)
+    return;
+
+  BugReport *report =
+    new BugReport(*new InitSelfBug(), errorStr, N);
+  C.emitReport(report);
+}
+
+void ObjCSelfInitChecker::checkPostObjCMessage(const ObjCMethodCall &Msg,
+                                               CheckerContext &C) const {
+  // When encountering a message that does initialization (init rule),
+  // tag the return value so that we know later on that if self has this value
+  // then it is properly initialized.
+
+  // FIXME: A callback should disable checkers at the start of functions.
+  if (!shouldRunOnFunctionOrMethod(dyn_cast<NamedDecl>(
+                                C.getCurrentAnalysisDeclContext()->getDecl())))
+    return;
+
+  if (isInitMessage(Msg)) {
+    // Tag the return value as the result of an initializer.
+    ProgramStateRef state = C.getState();
+    
+    // FIXME this really should be context sensitive, where we record
+    // the current stack frame (for IPA).  Also, we need to clean this
+    // value out when we return from this method.
+    state = state->set<CalledInit>(true);
+    
+    SVal V = state->getSVal(Msg.getOriginExpr(), C.getLocationContext());
+    addSelfFlag(state, V, SelfFlag_InitRes, C);
+    return;
+  }
+
+  // We don't check for an invalid 'self' in an obj-c message expression to cut
+  // down false positives where logging functions get information from self
+  // (like its class) or doing "invalidation" on self when the initialization
+  // fails.
+}
+
+void ObjCSelfInitChecker::checkPostStmt(const ObjCIvarRefExpr *E,
+                                        CheckerContext &C) const {
+  // FIXME: A callback should disable checkers at the start of functions.
+  if (!shouldRunOnFunctionOrMethod(dyn_cast<NamedDecl>(
+                                 C.getCurrentAnalysisDeclContext()->getDecl())))
+    return;
+
+  checkForInvalidSelf(E->getBase(), C,
+    "Instance variable used while 'self' is not set to the result of "
+                                                 "'[(super or self) init...]'");
+}
+
+void ObjCSelfInitChecker::checkPreStmt(const ReturnStmt *S,
+                                       CheckerContext &C) const {
+  // FIXME: A callback should disable checkers at the start of functions.
+  if (!shouldRunOnFunctionOrMethod(dyn_cast<NamedDecl>(
+                                 C.getCurrentAnalysisDeclContext()->getDecl())))
+    return;
+
+  checkForInvalidSelf(S->getRetValue(), C,
+    "Returning 'self' while it is not set to the result of "
+                                                 "'[(super or self) init...]'");
+}
+
+// When a call receives a reference to 'self', [Pre/Post]Call pass
+// the SelfFlags from the object 'self' points to before the call to the new
+// object after the call. This is to avoid invalidation of 'self' by logging
+// functions.
+// Another common pattern in classes with multiple initializers is to put the
+// subclass's common initialization bits into a static function that receives
+// the value of 'self', e.g:
+// @code
+//   if (!(self = [super init]))
+//     return nil;
+//   if (!(self = _commonInit(self)))
+//     return nil;
+// @endcode
+// Until we can use inter-procedural analysis, in such a call, transfer the
+// SelfFlags to the result of the call.
+
+void ObjCSelfInitChecker::checkPreCall(const CallEvent &CE,
+                                       CheckerContext &C) const {
+  // FIXME: A callback should disable checkers at the start of functions.
+  if (!shouldRunOnFunctionOrMethod(dyn_cast<NamedDecl>(
+                                 C.getCurrentAnalysisDeclContext()->getDecl())))
+    return;
+
+  ProgramStateRef state = C.getState();
+  unsigned NumArgs = CE.getNumArgs();
+  // If we passed 'self' as and argument to the call, record it in the state
+  // to be propagated after the call.
+  // Note, we could have just given up, but try to be more optimistic here and
+  // assume that the functions are going to continue initialization or will not
+  // modify self.
+  for (unsigned i = 0; i < NumArgs; ++i) {
+    SVal argV = CE.getArgSVal(i);
+    if (isSelfVar(argV, C)) {
+      unsigned selfFlags = getSelfFlags(state->getSVal(argV.castAs<Loc>()), C);
+      C.addTransition(state->set<PreCallSelfFlags>(selfFlags));
+      return;
+    } else if (hasSelfFlag(argV, SelfFlag_Self, C)) {
+      unsigned selfFlags = getSelfFlags(argV, C);
+      C.addTransition(state->set<PreCallSelfFlags>(selfFlags));
+      return;
+    }
+  }
+}
+
+void ObjCSelfInitChecker::checkPostCall(const CallEvent &CE,
+                                        CheckerContext &C) const {
+  // FIXME: A callback should disable checkers at the start of functions.
+  if (!shouldRunOnFunctionOrMethod(dyn_cast<NamedDecl>(
+                                 C.getCurrentAnalysisDeclContext()->getDecl())))
+    return;
+
+  ProgramStateRef state = C.getState();
+  SelfFlagEnum prevFlags = (SelfFlagEnum)state->get<PreCallSelfFlags>();
+  if (!prevFlags)
+    return;
+  state = state->remove<PreCallSelfFlags>();
+
+  unsigned NumArgs = CE.getNumArgs();
+  for (unsigned i = 0; i < NumArgs; ++i) {
+    SVal argV = CE.getArgSVal(i);
+    if (isSelfVar(argV, C)) {
+      // If the address of 'self' is being passed to the call, assume that the
+      // 'self' after the call will have the same flags.
+      // EX: log(&self)
+      addSelfFlag(state, state->getSVal(argV.castAs<Loc>()), prevFlags, C);
+      return;
+    } else if (hasSelfFlag(argV, SelfFlag_Self, C)) {
+      // If 'self' is passed to the call by value, assume that the function
+      // returns 'self'. So assign the flags, which were set on 'self' to the
+      // return value.
+      // EX: self = performMoreInitialization(self)
+      addSelfFlag(state, CE.getReturnValue(), prevFlags, C);
+      return;
+    }
+  }
+
+  C.addTransition(state);
+}
+
+void ObjCSelfInitChecker::checkLocation(SVal location, bool isLoad,
+                                        const Stmt *S,
+                                        CheckerContext &C) const {
+  if (!shouldRunOnFunctionOrMethod(dyn_cast<NamedDecl>(
+        C.getCurrentAnalysisDeclContext()->getDecl())))
+    return;
+
+  // Tag the result of a load from 'self' so that we can easily know that the
+  // value is the object that 'self' points to.
+  ProgramStateRef state = C.getState();
+  if (isSelfVar(location, C))
+    addSelfFlag(state, state->getSVal(location.castAs<Loc>()), SelfFlag_Self,
+                C);
+}
+
+
+void ObjCSelfInitChecker::checkBind(SVal loc, SVal val, const Stmt *S,
+                                    CheckerContext &C) const {
+  // Allow assignment of anything to self. Self is a local variable in the
+  // initializer, so it is legal to assign anything to it, like results of
+  // static functions/method calls. After self is assigned something we cannot 
+  // reason about, stop enforcing the rules.
+  // (Only continue checking if the assigned value should be treated as self.)
+  if ((isSelfVar(loc, C)) &&
+      !hasSelfFlag(val, SelfFlag_InitRes, C) &&
+      !hasSelfFlag(val, SelfFlag_Self, C) &&
+      !isSelfVar(val, C)) {
+
+    // Stop tracking the checker-specific state in the state.
+    ProgramStateRef State = C.getState();
+    State = State->remove<CalledInit>();
+    if (SymbolRef sym = loc.getAsSymbol())
+      State = State->remove<SelfFlag>(sym);
+    C.addTransition(State);
+  }
+}
+
+void ObjCSelfInitChecker::printState(raw_ostream &Out, ProgramStateRef State,
+                                     const char *NL, const char *Sep) const {
+  SelfFlagTy FlagMap = State->get<SelfFlag>();
+  bool DidCallInit = State->get<CalledInit>();
+  SelfFlagEnum PreCallFlags = (SelfFlagEnum)State->get<PreCallSelfFlags>();
+
+  if (FlagMap.isEmpty() && !DidCallInit && !PreCallFlags)
+    return;
+
+  Out << Sep << NL << "ObjCSelfInitChecker:" << NL;
+
+  if (DidCallInit)
+    Out << "  An init method has been called." << NL;
+
+  if (PreCallFlags != SelfFlag_None) {
+    if (PreCallFlags & SelfFlag_Self) {
+      Out << "  An argument of the current call came from the 'self' variable."
+          << NL;
+    }
+    if (PreCallFlags & SelfFlag_InitRes) {
+      Out << "  An argument of the current call came from an init method."
+          << NL;
+    }
+  }
+
+  Out << NL;
+  for (SelfFlagTy::iterator I = FlagMap.begin(), E = FlagMap.end();
+       I != E; ++I) {
+    Out << I->first << " : ";
+
+    if (I->second == SelfFlag_None)
+      Out << "none";
+
+    if (I->second & SelfFlag_Self)
+      Out << "self variable";
+
+    if (I->second & SelfFlag_InitRes) {
+      if (I->second != SelfFlag_InitRes)
+        Out << " | ";
+      Out << "result of init method";
+    }
+
+    Out << NL;
+  }
+}
+
+
+// FIXME: A callback should disable checkers at the start of functions.
+static bool shouldRunOnFunctionOrMethod(const NamedDecl *ND) {
+  if (!ND)
+    return false;
+
+  const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(ND);
+  if (!MD)
+    return false;
+  if (!isInitializationMethod(MD))
+    return false;
+
+  // self = [super init] applies only to NSObject subclasses.
+  // For instance, NSProxy doesn't implement -init.
+  ASTContext &Ctx = MD->getASTContext();
+  IdentifierInfo* NSObjectII = &Ctx.Idents.get("NSObject");
+  ObjCInterfaceDecl *ID = MD->getClassInterface()->getSuperClass();
+  for ( ; ID ; ID = ID->getSuperClass()) {
+    IdentifierInfo *II = ID->getIdentifier();
+
+    if (II == NSObjectII)
+      break;
+  }
+  if (!ID)
+    return false;
+
+  return true;
+}
+
+/// \brief Returns true if the location is 'self'.
+static bool isSelfVar(SVal location, CheckerContext &C) {
+  AnalysisDeclContext *analCtx = C.getCurrentAnalysisDeclContext(); 
+  if (!analCtx->getSelfDecl())
+    return false;
+  if (!location.getAs<loc::MemRegionVal>())
+    return false;
+
+  loc::MemRegionVal MRV = location.castAs<loc::MemRegionVal>();
+  if (const DeclRegion *DR = dyn_cast<DeclRegion>(MRV.stripCasts()))
+    return (DR->getDecl() == analCtx->getSelfDecl());
+
+  return false;
+}
+
+static bool isInitializationMethod(const ObjCMethodDecl *MD) {
+  return MD->getMethodFamily() == OMF_init;
+}
+
+static bool isInitMessage(const ObjCMethodCall &Call) {
+  return Call.getMethodFamily() == OMF_init;
+}
+
+//===----------------------------------------------------------------------===//
+// Registration.
+//===----------------------------------------------------------------------===//
+
+void ento::registerObjCSelfInitChecker(CheckerManager &mgr) {
+  mgr.registerChecker<ObjCSelfInitChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
new file mode 100644
index 0000000..c66c7d0
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
@@ -0,0 +1,197 @@
+//==- ObjCUnusedIVarsChecker.cpp - Check for unused ivars --------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines a CheckObjCUnusedIvars, a checker that
+//  analyzes an Objective-C class's interface/implementation to determine if it
+//  has any ivars that are never accessed.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+
+using namespace clang;
+using namespace ento;
+
+enum IVarState { Unused, Used };
+typedef llvm::DenseMap<const ObjCIvarDecl*,IVarState> IvarUsageMap;
+
+static void Scan(IvarUsageMap& M, const Stmt *S) {
+  if (!S)
+    return;
+
+  if (const ObjCIvarRefExpr *Ex = dyn_cast<ObjCIvarRefExpr>(S)) {
+    const ObjCIvarDecl *D = Ex->getDecl();
+    IvarUsageMap::iterator I = M.find(D);
+    if (I != M.end())
+      I->second = Used;
+    return;
+  }
+
+  // Blocks can reference an instance variable of a class.
+  if (const BlockExpr *BE = dyn_cast<BlockExpr>(S)) {
+    Scan(M, BE->getBody());
+    return;
+  }
+
+  if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(S))
+    for (PseudoObjectExpr::const_semantics_iterator
+        i = POE->semantics_begin(), e = POE->semantics_end(); i != e; ++i) {
+      const Expr *sub = *i;
+      if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(sub))
+        sub = OVE->getSourceExpr();
+      Scan(M, sub);
+    }
+
+  for (Stmt::const_child_iterator I=S->child_begin(),E=S->child_end(); I!=E;++I)
+    Scan(M, *I);
+}
+
+static void Scan(IvarUsageMap& M, const ObjCPropertyImplDecl *D) {
+  if (!D)
+    return;
+
+  const ObjCIvarDecl *ID = D->getPropertyIvarDecl();
+
+  if (!ID)
+    return;
+
+  IvarUsageMap::iterator I = M.find(ID);
+  if (I != M.end())
+    I->second = Used;
+}
+
+static void Scan(IvarUsageMap& M, const ObjCContainerDecl *D) {
+  // Scan the methods for accesses.
+  for (ObjCContainerDecl::instmeth_iterator I = D->instmeth_begin(),
+       E = D->instmeth_end(); I!=E; ++I)
+    Scan(M, (*I)->getBody());
+
+  if (const ObjCImplementationDecl *ID = dyn_cast<ObjCImplementationDecl>(D)) {
+    // Scan for @synthesized property methods that act as setters/getters
+    // to an ivar.
+    for (ObjCImplementationDecl::propimpl_iterator I = ID->propimpl_begin(),
+         E = ID->propimpl_end(); I!=E; ++I)
+      Scan(M, *I);
+
+    // Scan the associated categories as well.
+    for (ObjCInterfaceDecl::visible_categories_iterator
+           Cat = ID->getClassInterface()->visible_categories_begin(),
+           CatEnd = ID->getClassInterface()->visible_categories_end();
+         Cat != CatEnd; ++Cat) {
+      if (const ObjCCategoryImplDecl *CID = Cat->getImplementation())
+        Scan(M, CID);
+    }
+  }
+}
+
+static void Scan(IvarUsageMap &M, const DeclContext *C, const FileID FID,
+                 SourceManager &SM) {
+  for (DeclContext::decl_iterator I=C->decls_begin(), E=C->decls_end();
+       I!=E; ++I)
+    if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
+      SourceLocation L = FD->getLocStart();
+      if (SM.getFileID(L) == FID)
+        Scan(M, FD->getBody());
+    }
+}
+
+static void checkObjCUnusedIvar(const ObjCImplementationDecl *D,
+                                BugReporter &BR) {
+
+  const ObjCInterfaceDecl *ID = D->getClassInterface();
+  IvarUsageMap M;
+
+  // Iterate over the ivars.
+  for (ObjCInterfaceDecl::ivar_iterator I=ID->ivar_begin(),
+        E=ID->ivar_end(); I!=E; ++I) {
+
+    const ObjCIvarDecl *ID = *I;
+
+    // Ignore ivars that...
+    // (a) aren't private
+    // (b) explicitly marked unused
+    // (c) are iboutlets
+    // (d) are unnamed bitfields
+    if (ID->getAccessControl() != ObjCIvarDecl::Private ||
+        ID->getAttr<UnusedAttr>() || ID->getAttr<IBOutletAttr>() ||
+        ID->getAttr<IBOutletCollectionAttr>() ||
+        ID->isUnnamedBitfield())
+      continue;
+
+    M[ID] = Unused;
+  }
+
+  if (M.empty())
+    return;
+
+  // Now scan the implementation declaration.
+  Scan(M, D);
+
+  // Any potentially unused ivars?
+  bool hasUnused = false;
+  for (IvarUsageMap::iterator I = M.begin(), E = M.end(); I!=E; ++I)
+    if (I->second == Unused) {
+      hasUnused = true;
+      break;
+    }
+
+  if (!hasUnused)
+    return;
+
+  // We found some potentially unused ivars.  Scan the entire translation unit
+  // for functions inside the @implementation that reference these ivars.
+  // FIXME: In the future hopefully we can just use the lexical DeclContext
+  // to go from the ObjCImplementationDecl to the lexically "nested"
+  // C functions.
+  SourceManager &SM = BR.getSourceManager();
+  Scan(M, D->getDeclContext(), SM.getFileID(D->getLocation()), SM);
+
+  // Find ivars that are unused.
+  for (IvarUsageMap::iterator I = M.begin(), E = M.end(); I!=E; ++I)
+    if (I->second == Unused) {
+      std::string sbuf;
+      llvm::raw_string_ostream os(sbuf);
+      os << "Instance variable '" << *I->first << "' in class '" << *ID
+         << "' is never used by the methods in its @implementation "
+            "(although it may be used by category methods).";
+
+      PathDiagnosticLocation L =
+        PathDiagnosticLocation::create(I->first, BR.getSourceManager());
+      BR.EmitBasicReport(D, "Unused instance variable", "Optimization",
+                         os.str(), L);
+    }
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCUnusedIvarsChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ObjCUnusedIvarsChecker : public Checker<
+                                      check::ASTDecl<ObjCImplementationDecl> > {
+public:
+  void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager& mgr,
+                    BugReporter &BR) const {
+    checkObjCUnusedIvar(D, BR);
+  }
+};
+}
+
+void ento::registerObjCUnusedIvarsChecker(CheckerManager &mgr) {
+  mgr.registerChecker<ObjCUnusedIvarsChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
new file mode 100644
index 0000000..bcbfacd
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
@@ -0,0 +1,69 @@
+//=== PointerArithChecker.cpp - Pointer arithmetic checker -----*- C++ -*--===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This files defines PointerArithChecker, a builtin checker that checks for
+// pointer arithmetic on locations other than array elements.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class PointerArithChecker 
+  : public Checker< check::PreStmt<BinaryOperator> > {
+  mutable OwningPtr<BuiltinBug> BT;
+
+public:
+  void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
+};
+}
+
+void PointerArithChecker::checkPreStmt(const BinaryOperator *B,
+                                       CheckerContext &C) const {
+  if (B->getOpcode() != BO_Sub && B->getOpcode() != BO_Add)
+    return;
+
+  ProgramStateRef state = C.getState();
+  const LocationContext *LCtx = C.getLocationContext();
+  SVal LV = state->getSVal(B->getLHS(), LCtx);
+  SVal RV = state->getSVal(B->getRHS(), LCtx);
+
+  const MemRegion *LR = LV.getAsRegion();
+
+  if (!LR || !RV.isConstant())
+    return;
+
+  // If pointer arithmetic is done on variables of non-array type, this often
+  // means behavior rely on memory organization, which is dangerous.
+  if (isa<VarRegion>(LR) || isa<CodeTextRegion>(LR) || 
+      isa<CompoundLiteralRegion>(LR)) {
+
+    if (ExplodedNode *N = C.addTransition()) {
+      if (!BT)
+        BT.reset(new BuiltinBug("Dangerous pointer arithmetic",
+                            "Pointer arithmetic done on non-array variables "
+                            "means reliance on memory layout, which is "
+                            "dangerous."));
+      BugReport *R = new BugReport(*BT, BT->getDescription(), N);
+      R->addRange(B->getSourceRange());
+      C.emitReport(R);
+    }
+  }
+}
+
+void ento::registerPointerArithChecker(CheckerManager &mgr) {
+  mgr.registerChecker<PointerArithChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
new file mode 100644
index 0000000..07c82d4
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
@@ -0,0 +1,76 @@
+//=== PointerSubChecker.cpp - Pointer subtraction checker ------*- C++ -*--===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This files defines PointerSubChecker, a builtin checker that checks for
+// pointer subtractions on two pointers pointing to different memory chunks. 
+// This check corresponds to CWE-469.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class PointerSubChecker 
+  : public Checker< check::PreStmt<BinaryOperator> > {
+  mutable OwningPtr<BuiltinBug> BT;
+
+public:
+  void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
+};
+}
+
+void PointerSubChecker::checkPreStmt(const BinaryOperator *B,
+                                     CheckerContext &C) const {
+  // When doing pointer subtraction, if the two pointers do not point to the
+  // same memory chunk, emit a warning.
+  if (B->getOpcode() != BO_Sub)
+    return;
+
+  ProgramStateRef state = C.getState();
+  const LocationContext *LCtx = C.getLocationContext();
+  SVal LV = state->getSVal(B->getLHS(), LCtx);
+  SVal RV = state->getSVal(B->getRHS(), LCtx);
+
+  const MemRegion *LR = LV.getAsRegion();
+  const MemRegion *RR = RV.getAsRegion();
+
+  if (!(LR && RR))
+    return;
+
+  const MemRegion *BaseLR = LR->getBaseRegion();
+  const MemRegion *BaseRR = RR->getBaseRegion();
+
+  if (BaseLR == BaseRR)
+    return;
+
+  // Allow arithmetic on different symbolic regions.
+  if (isa<SymbolicRegion>(BaseLR) || isa<SymbolicRegion>(BaseRR))
+    return;
+
+  if (ExplodedNode *N = C.addTransition()) {
+    if (!BT)
+      BT.reset(new BuiltinBug("Pointer subtraction", 
+                          "Subtraction of two pointers that do not point to "
+                          "the same memory chunk may cause incorrect result."));
+    BugReport *R = new BugReport(*BT, BT->getDescription(), N);
+    R->addRange(B->getSourceRange());
+    C.emitReport(R);
+  }
+}
+
+void ento::registerPointerSubChecker(CheckerManager &mgr) {
+  mgr.registerChecker<PointerSubChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
new file mode 100644
index 0000000..ffb8cf2
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
@@ -0,0 +1,190 @@
+//===--- PthreadLockChecker.cpp - Check for locking problems ---*- C++ -*--===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines PthreadLockChecker, a simple lock -> unlock checker.
+// Also handles XNU locks, which behave similarly enough to share code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "llvm/ADT/ImmutableList.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class PthreadLockChecker : public Checker< check::PostStmt<CallExpr> > {
+  mutable OwningPtr<BugType> BT_doublelock;
+  mutable OwningPtr<BugType> BT_lor;
+  enum LockingSemantics {
+    NotApplicable = 0,
+    PthreadSemantics,
+    XNUSemantics
+  };
+public:
+  void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
+    
+  void AcquireLock(CheckerContext &C, const CallExpr *CE, SVal lock,
+                   bool isTryLock, enum LockingSemantics semantics) const;
+    
+  void ReleaseLock(CheckerContext &C, const CallExpr *CE, SVal lock) const;
+};
+} // end anonymous namespace
+
+// GDM Entry for tracking lock state.
+REGISTER_LIST_WITH_PROGRAMSTATE(LockSet, const MemRegion *)
+
+
+void PthreadLockChecker::checkPostStmt(const CallExpr *CE,
+                                       CheckerContext &C) const {
+  ProgramStateRef state = C.getState();
+  const LocationContext *LCtx = C.getLocationContext();
+  StringRef FName = C.getCalleeName(CE);
+  if (FName.empty())
+    return;
+
+  if (CE->getNumArgs() != 1)
+    return;
+
+  if (FName == "pthread_mutex_lock" ||
+      FName == "pthread_rwlock_rdlock" ||
+      FName == "pthread_rwlock_wrlock")
+    AcquireLock(C, CE, state->getSVal(CE->getArg(0), LCtx),
+                false, PthreadSemantics);
+  else if (FName == "lck_mtx_lock" ||
+           FName == "lck_rw_lock_exclusive" ||
+           FName == "lck_rw_lock_shared") 
+    AcquireLock(C, CE, state->getSVal(CE->getArg(0), LCtx),
+                false, XNUSemantics);
+  else if (FName == "pthread_mutex_trylock" ||
+           FName == "pthread_rwlock_tryrdlock" ||
+           FName == "pthread_rwlock_tryrwlock")
+    AcquireLock(C, CE, state->getSVal(CE->getArg(0), LCtx),
+                true, PthreadSemantics);
+  else if (FName == "lck_mtx_try_lock" ||
+           FName == "lck_rw_try_lock_exclusive" ||
+           FName == "lck_rw_try_lock_shared")
+    AcquireLock(C, CE, state->getSVal(CE->getArg(0), LCtx),
+                true, XNUSemantics);
+  else if (FName == "pthread_mutex_unlock" ||
+           FName == "pthread_rwlock_unlock" ||
+           FName == "lck_mtx_unlock" ||
+           FName == "lck_rw_done")
+    ReleaseLock(C, CE, state->getSVal(CE->getArg(0), LCtx));
+}
+
+void PthreadLockChecker::AcquireLock(CheckerContext &C, const CallExpr *CE,
+                                     SVal lock, bool isTryLock,
+                                     enum LockingSemantics semantics) const {
+  
+  const MemRegion *lockR = lock.getAsRegion();
+  if (!lockR)
+    return;
+  
+  ProgramStateRef state = C.getState();
+  
+  SVal X = state->getSVal(CE, C.getLocationContext());
+  if (X.isUnknownOrUndef())
+    return;
+  
+  DefinedSVal retVal = X.castAs<DefinedSVal>();
+
+  if (state->contains<LockSet>(lockR)) {
+    if (!BT_doublelock)
+      BT_doublelock.reset(new BugType("Double locking", "Lock checker"));
+    ExplodedNode *N = C.generateSink();
+    if (!N)
+      return;
+    BugReport *report = new BugReport(*BT_doublelock,
+                                                      "This lock has already "
+                                                      "been acquired", N);
+    report->addRange(CE->getArg(0)->getSourceRange());
+    C.emitReport(report);
+    return;
+  }
+
+  ProgramStateRef lockSucc = state;
+  if (isTryLock) {
+    // Bifurcate the state, and allow a mode where the lock acquisition fails.
+    ProgramStateRef lockFail;
+    switch (semantics) {
+    case PthreadSemantics:
+      llvm::tie(lockFail, lockSucc) = state->assume(retVal);    
+      break;
+    case XNUSemantics:
+      llvm::tie(lockSucc, lockFail) = state->assume(retVal);    
+      break;
+    default:
+      llvm_unreachable("Unknown tryLock locking semantics");
+    }
+    assert(lockFail && lockSucc);
+    C.addTransition(lockFail);
+
+  } else if (semantics == PthreadSemantics) {
+    // Assume that the return value was 0.
+    lockSucc = state->assume(retVal, false);
+    assert(lockSucc);
+
+  } else {
+    // XNU locking semantics return void on non-try locks
+    assert((semantics == XNUSemantics) && "Unknown locking semantics");
+    lockSucc = state;
+  }
+  
+  // Record that the lock was acquired.  
+  lockSucc = lockSucc->add<LockSet>(lockR);
+  C.addTransition(lockSucc);
+}
+
+void PthreadLockChecker::ReleaseLock(CheckerContext &C, const CallExpr *CE,
+                                     SVal lock) const {
+
+  const MemRegion *lockR = lock.getAsRegion();
+  if (!lockR)
+    return;
+  
+  ProgramStateRef state = C.getState();
+  LockSetTy LS = state->get<LockSet>();
+
+  // FIXME: Better analysis requires IPA for wrappers.
+  // FIXME: check for double unlocks
+  if (LS.isEmpty())
+    return;
+  
+  const MemRegion *firstLockR = LS.getHead();
+  if (firstLockR != lockR) {
+    if (!BT_lor)
+      BT_lor.reset(new BugType("Lock order reversal", "Lock checker"));
+    ExplodedNode *N = C.generateSink();
+    if (!N)
+      return;
+    BugReport *report = new BugReport(*BT_lor,
+                                                      "This was not the most "
+                                                      "recently acquired lock. "
+                                                      "Possible lock order "
+                                                      "reversal", N);
+    report->addRange(CE->getArg(0)->getSourceRange());
+    C.emitReport(report);
+    return;
+  }
+
+  // Record that the lock was released. 
+  state = state->set<LockSet>(LS.getTail());
+  C.addTransition(state);
+}
+
+
+void ento::registerPthreadLockChecker(CheckerManager &mgr) {
+  mgr.registerChecker<PthreadLockChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
new file mode 100644
index 0000000..0f456ea
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
@@ -0,0 +1,3749 @@
+//==-- RetainCountChecker.cpp - Checks for leaks and other issues -*- C++ -*--//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines the methods for RetainCountChecker, which implements
+//  a reference count checker for Core Foundation and Cocoa on (Mac OS X).
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/ImmutableList.h"
+#include "llvm/ADT/ImmutableMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include <cstdarg>
+
+#include "AllocationDiagnostics.h"
+
+using namespace clang;
+using namespace ento;
+using llvm::StrInStrNoCase;
+
+//===----------------------------------------------------------------------===//
+// Primitives used for constructing summaries for function/method calls.
+//===----------------------------------------------------------------------===//
+
+/// ArgEffect is used to summarize a function/method call's effect on a
+/// particular argument.
+enum ArgEffect { DoNothing, Autorelease, Dealloc, DecRef, DecRefMsg,
+                 DecRefBridgedTransfered,
+                 IncRefMsg, IncRef, MakeCollectable, MayEscape,
+
+                 // Stop tracking the argument - the effect of the call is
+                 // unknown.
+                 StopTracking,
+
+                 // In some cases, we obtain a better summary for this checker
+                 // by looking at the call site than by inlining the function.
+                 // Signifies that we should stop tracking the symbol even if
+                 // the function is inlined.
+                 StopTrackingHard,
+
+                 // The function decrements the reference count and the checker 
+                 // should stop tracking the argument.
+                 DecRefAndStopTrackingHard, DecRefMsgAndStopTrackingHard
+               };
+
+namespace llvm {
+template <> struct FoldingSetTrait<ArgEffect> {
+static inline void Profile(const ArgEffect X, FoldingSetNodeID& ID) {
+  ID.AddInteger((unsigned) X);
+}
+};
+} // end llvm namespace
+
+/// ArgEffects summarizes the effects of a function/method call on all of
+/// its arguments.
+typedef llvm::ImmutableMap<unsigned,ArgEffect> ArgEffects;
+
+namespace {
+
+///  RetEffect is used to summarize a function/method call's behavior with
+///  respect to its return value.
+class RetEffect {
+public:
+  enum Kind { NoRet, OwnedSymbol, OwnedAllocatedSymbol,
+              NotOwnedSymbol, GCNotOwnedSymbol, ARCNotOwnedSymbol,
+              OwnedWhenTrackedReceiver,
+              // Treat this function as returning a non-tracked symbol even if
+              // the function has been inlined. This is used where the call
+              // site summary is more presise than the summary indirectly produced 
+              // by inlining the function
+              NoRetHard
+            };
+
+  enum ObjKind { CF, ObjC, AnyObj };
+
+private:
+  Kind K;
+  ObjKind O;
+
+  RetEffect(Kind k, ObjKind o = AnyObj) : K(k), O(o) {}
+
+public:
+  Kind getKind() const { return K; }
+
+  ObjKind getObjKind() const { return O; }
+
+  bool isOwned() const {
+    return K == OwnedSymbol || K == OwnedAllocatedSymbol ||
+           K == OwnedWhenTrackedReceiver;
+  }
+
+  bool operator==(const RetEffect &Other) const {
+    return K == Other.K && O == Other.O;
+  }
+
+  static RetEffect MakeOwnedWhenTrackedReceiver() {
+    return RetEffect(OwnedWhenTrackedReceiver, ObjC);
+  }
+
+  static RetEffect MakeOwned(ObjKind o, bool isAllocated = false) {
+    return RetEffect(isAllocated ? OwnedAllocatedSymbol : OwnedSymbol, o);
+  }
+  static RetEffect MakeNotOwned(ObjKind o) {
+    return RetEffect(NotOwnedSymbol, o);
+  }
+  static RetEffect MakeGCNotOwned() {
+    return RetEffect(GCNotOwnedSymbol, ObjC);
+  }
+  static RetEffect MakeARCNotOwned() {
+    return RetEffect(ARCNotOwnedSymbol, ObjC);
+  }
+  static RetEffect MakeNoRet() {
+    return RetEffect(NoRet);
+  }
+  static RetEffect MakeNoRetHard() {
+    return RetEffect(NoRetHard);
+  }
+
+  void Profile(llvm::FoldingSetNodeID& ID) const {
+    ID.AddInteger((unsigned) K);
+    ID.AddInteger((unsigned) O);
+  }
+};
+
+//===----------------------------------------------------------------------===//
+// Reference-counting logic (typestate + counts).
+//===----------------------------------------------------------------------===//
+
+class RefVal {
+public:
+  enum Kind {
+    Owned = 0, // Owning reference.
+    NotOwned,  // Reference is not owned by still valid (not freed).
+    Released,  // Object has been released.
+    ReturnedOwned, // Returned object passes ownership to caller.
+    ReturnedNotOwned, // Return object does not pass ownership to caller.
+    ERROR_START,
+    ErrorDeallocNotOwned, // -dealloc called on non-owned object.
+    ErrorDeallocGC, // Calling -dealloc with GC enabled.
+    ErrorUseAfterRelease, // Object used after released.
+    ErrorReleaseNotOwned, // Release of an object that was not owned.
+    ERROR_LEAK_START,
+    ErrorLeak,  // A memory leak due to excessive reference counts.
+    ErrorLeakReturned, // A memory leak due to the returning method not having
+                       // the correct naming conventions.
+    ErrorGCLeakReturned,
+    ErrorOverAutorelease,
+    ErrorReturnedNotOwned
+  };
+
+private:
+  Kind kind;
+  RetEffect::ObjKind okind;
+  unsigned Cnt;
+  unsigned ACnt;
+  QualType T;
+
+  RefVal(Kind k, RetEffect::ObjKind o, unsigned cnt, unsigned acnt, QualType t)
+  : kind(k), okind(o), Cnt(cnt), ACnt(acnt), T(t) {}
+
+public:
+  Kind getKind() const { return kind; }
+
+  RetEffect::ObjKind getObjKind() const { return okind; }
+
+  unsigned getCount() const { return Cnt; }
+  unsigned getAutoreleaseCount() const { return ACnt; }
+  unsigned getCombinedCounts() const { return Cnt + ACnt; }
+  void clearCounts() { Cnt = 0; ACnt = 0; }
+  void setCount(unsigned i) { Cnt = i; }
+  void setAutoreleaseCount(unsigned i) { ACnt = i; }
+
+  QualType getType() const { return T; }
+
+  bool isOwned() const {
+    return getKind() == Owned;
+  }
+
+  bool isNotOwned() const {
+    return getKind() == NotOwned;
+  }
+
+  bool isReturnedOwned() const {
+    return getKind() == ReturnedOwned;
+  }
+
+  bool isReturnedNotOwned() const {
+    return getKind() == ReturnedNotOwned;
+  }
+
+  static RefVal makeOwned(RetEffect::ObjKind o, QualType t,
+                          unsigned Count = 1) {
+    return RefVal(Owned, o, Count, 0, t);
+  }
+
+  static RefVal makeNotOwned(RetEffect::ObjKind o, QualType t,
+                             unsigned Count = 0) {
+    return RefVal(NotOwned, o, Count, 0, t);
+  }
+
+  // Comparison, profiling, and pretty-printing.
+
+  bool operator==(const RefVal& X) const {
+    return kind == X.kind && Cnt == X.Cnt && T == X.T && ACnt == X.ACnt;
+  }
+
+  RefVal operator-(size_t i) const {
+    return RefVal(getKind(), getObjKind(), getCount() - i,
+                  getAutoreleaseCount(), getType());
+  }
+
+  RefVal operator+(size_t i) const {
+    return RefVal(getKind(), getObjKind(), getCount() + i,
+                  getAutoreleaseCount(), getType());
+  }
+
+  RefVal operator^(Kind k) const {
+    return RefVal(k, getObjKind(), getCount(), getAutoreleaseCount(),
+                  getType());
+  }
+
+  RefVal autorelease() const {
+    return RefVal(getKind(), getObjKind(), getCount(), getAutoreleaseCount()+1,
+                  getType());
+  }
+
+  void Profile(llvm::FoldingSetNodeID& ID) const {
+    ID.AddInteger((unsigned) kind);
+    ID.AddInteger(Cnt);
+    ID.AddInteger(ACnt);
+    ID.Add(T);
+  }
+
+  void print(raw_ostream &Out) const;
+};
+
+void RefVal::print(raw_ostream &Out) const {
+  if (!T.isNull())
+    Out << "Tracked " << T.getAsString() << '/';
+
+  switch (getKind()) {
+    default: llvm_unreachable("Invalid RefVal kind");
+    case Owned: {
+      Out << "Owned";
+      unsigned cnt = getCount();
+      if (cnt) Out << " (+ " << cnt << ")";
+      break;
+    }
+
+    case NotOwned: {
+      Out << "NotOwned";
+      unsigned cnt = getCount();
+      if (cnt) Out << " (+ " << cnt << ")";
+      break;
+    }
+
+    case ReturnedOwned: {
+      Out << "ReturnedOwned";
+      unsigned cnt = getCount();
+      if (cnt) Out << " (+ " << cnt << ")";
+      break;
+    }
+
+    case ReturnedNotOwned: {
+      Out << "ReturnedNotOwned";
+      unsigned cnt = getCount();
+      if (cnt) Out << " (+ " << cnt << ")";
+      break;
+    }
+
+    case Released:
+      Out << "Released";
+      break;
+
+    case ErrorDeallocGC:
+      Out << "-dealloc (GC)";
+      break;
+
+    case ErrorDeallocNotOwned:
+      Out << "-dealloc (not-owned)";
+      break;
+
+    case ErrorLeak:
+      Out << "Leaked";
+      break;
+
+    case ErrorLeakReturned:
+      Out << "Leaked (Bad naming)";
+      break;
+
+    case ErrorGCLeakReturned:
+      Out << "Leaked (GC-ed at return)";
+      break;
+
+    case ErrorUseAfterRelease:
+      Out << "Use-After-Release [ERROR]";
+      break;
+
+    case ErrorReleaseNotOwned:
+      Out << "Release of Not-Owned [ERROR]";
+      break;
+
+    case RefVal::ErrorOverAutorelease:
+      Out << "Over-autoreleased";
+      break;
+
+    case RefVal::ErrorReturnedNotOwned:
+      Out << "Non-owned object returned instead of owned";
+      break;
+  }
+
+  if (ACnt) {
+    Out << " [ARC +" << ACnt << ']';
+  }
+}
+} //end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// RefBindings - State used to track object reference counts.
+//===----------------------------------------------------------------------===//
+
+REGISTER_MAP_WITH_PROGRAMSTATE(RefBindings, SymbolRef, RefVal)
+
+static inline const RefVal *getRefBinding(ProgramStateRef State,
+                                          SymbolRef Sym) {
+  return State->get<RefBindings>(Sym);
+}
+
+static inline ProgramStateRef setRefBinding(ProgramStateRef State,
+                                            SymbolRef Sym, RefVal Val) {
+  return State->set<RefBindings>(Sym, Val);
+}
+
+static ProgramStateRef removeRefBinding(ProgramStateRef State, SymbolRef Sym) {
+  return State->remove<RefBindings>(Sym);
+}
+
+//===----------------------------------------------------------------------===//
+// Function/Method behavior summaries.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class RetainSummary {
+  /// Args - a map of (index, ArgEffect) pairs, where index
+  ///  specifies the argument (starting from 0).  This can be sparsely
+  ///  populated; arguments with no entry in Args use 'DefaultArgEffect'.
+  ArgEffects Args;
+
+  /// DefaultArgEffect - The default ArgEffect to apply to arguments that
+  ///  do not have an entry in Args.
+  ArgEffect DefaultArgEffect;
+
+  /// Receiver - If this summary applies to an Objective-C message expression,
+  ///  this is the effect applied to the state of the receiver.
+  ArgEffect Receiver;
+
+  /// Ret - The effect on the return value.  Used to indicate if the
+  ///  function/method call returns a new tracked symbol.
+  RetEffect Ret;
+
+public:
+  RetainSummary(ArgEffects A, RetEffect R, ArgEffect defaultEff,
+                ArgEffect ReceiverEff)
+    : Args(A), DefaultArgEffect(defaultEff), Receiver(ReceiverEff), Ret(R) {}
+
+  /// getArg - Return the argument effect on the argument specified by
+  ///  idx (starting from 0).
+  ArgEffect getArg(unsigned idx) const {
+    if (const ArgEffect *AE = Args.lookup(idx))
+      return *AE;
+
+    return DefaultArgEffect;
+  }
+  
+  void addArg(ArgEffects::Factory &af, unsigned idx, ArgEffect e) {
+    Args = af.add(Args, idx, e);
+  }
+
+  /// setDefaultArgEffect - Set the default argument effect.
+  void setDefaultArgEffect(ArgEffect E) {
+    DefaultArgEffect = E;
+  }
+
+  /// getRetEffect - Returns the effect on the return value of the call.
+  RetEffect getRetEffect() const { return Ret; }
+
+  /// setRetEffect - Set the effect of the return value of the call.
+  void setRetEffect(RetEffect E) { Ret = E; }
+
+  
+  /// Sets the effect on the receiver of the message.
+  void setReceiverEffect(ArgEffect e) { Receiver = e; }
+  
+  /// getReceiverEffect - Returns the effect on the receiver of the call.
+  ///  This is only meaningful if the summary applies to an ObjCMessageExpr*.
+  ArgEffect getReceiverEffect() const { return Receiver; }
+
+  /// Test if two retain summaries are identical. Note that merely equivalent
+  /// summaries are not necessarily identical (for example, if an explicit 
+  /// argument effect matches the default effect).
+  bool operator==(const RetainSummary &Other) const {
+    return Args == Other.Args && DefaultArgEffect == Other.DefaultArgEffect &&
+           Receiver == Other.Receiver && Ret == Other.Ret;
+  }
+
+  /// Profile this summary for inclusion in a FoldingSet.
+  void Profile(llvm::FoldingSetNodeID& ID) const {
+    ID.Add(Args);
+    ID.Add(DefaultArgEffect);
+    ID.Add(Receiver);
+    ID.Add(Ret);
+  }
+
+  /// A retain summary is simple if it has no ArgEffects other than the default.
+  bool isSimple() const {
+    return Args.isEmpty();
+  }
+
+private:
+  ArgEffects getArgEffects() const { return Args; }
+  ArgEffect getDefaultArgEffect() const { return DefaultArgEffect; }
+
+  friend class RetainSummaryManager;
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Data structures for constructing summaries.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ObjCSummaryKey {
+  IdentifierInfo* II;
+  Selector S;
+public:
+  ObjCSummaryKey(IdentifierInfo* ii, Selector s)
+    : II(ii), S(s) {}
+
+  ObjCSummaryKey(const ObjCInterfaceDecl *d, Selector s)
+    : II(d ? d->getIdentifier() : 0), S(s) {}
+
+  ObjCSummaryKey(Selector s)
+    : II(0), S(s) {}
+
+  IdentifierInfo *getIdentifier() const { return II; }
+  Selector getSelector() const { return S; }
+};
+}
+
+namespace llvm {
+template <> struct DenseMapInfo<ObjCSummaryKey> {
+  static inline ObjCSummaryKey getEmptyKey() {
+    return ObjCSummaryKey(DenseMapInfo<IdentifierInfo*>::getEmptyKey(),
+                          DenseMapInfo<Selector>::getEmptyKey());
+  }
+
+  static inline ObjCSummaryKey getTombstoneKey() {
+    return ObjCSummaryKey(DenseMapInfo<IdentifierInfo*>::getTombstoneKey(),
+                          DenseMapInfo<Selector>::getTombstoneKey());
+  }
+
+  static unsigned getHashValue(const ObjCSummaryKey &V) {
+    typedef std::pair<IdentifierInfo*, Selector> PairTy;
+    return DenseMapInfo<PairTy>::getHashValue(PairTy(V.getIdentifier(),
+                                                     V.getSelector()));
+  }
+
+  static bool isEqual(const ObjCSummaryKey& LHS, const ObjCSummaryKey& RHS) {
+    return LHS.getIdentifier() == RHS.getIdentifier() &&
+           LHS.getSelector() == RHS.getSelector();
+  }
+
+};
+template <>
+struct isPodLike<ObjCSummaryKey> { static const bool value = true; };
+} // end llvm namespace
+
+namespace {
+class ObjCSummaryCache {
+  typedef llvm::DenseMap<ObjCSummaryKey, const RetainSummary *> MapTy;
+  MapTy M;
+public:
+  ObjCSummaryCache() {}
+
+  const RetainSummary * find(const ObjCInterfaceDecl *D, Selector S) {
+    // Do a lookup with the (D,S) pair.  If we find a match return
+    // the iterator.
+    ObjCSummaryKey K(D, S);
+    MapTy::iterator I = M.find(K);
+
+    if (I != M.end())
+      return I->second;
+    if (!D)
+      return NULL;
+
+    // Walk the super chain.  If we find a hit with a parent, we'll end
+    // up returning that summary.  We actually allow that key (null,S), as
+    // we cache summaries for the null ObjCInterfaceDecl* to allow us to
+    // generate initial summaries without having to worry about NSObject
+    // being declared.
+    // FIXME: We may change this at some point.
+    for (ObjCInterfaceDecl *C=D->getSuperClass() ;; C=C->getSuperClass()) {
+      if ((I = M.find(ObjCSummaryKey(C, S))) != M.end())
+        break;
+
+      if (!C)
+        return NULL;
+    }
+
+    // Cache the summary with original key to make the next lookup faster
+    // and return the iterator.
+    const RetainSummary *Summ = I->second;
+    M[K] = Summ;
+    return Summ;
+  }
+
+  const RetainSummary *find(IdentifierInfo* II, Selector S) {
+    // FIXME: Class method lookup.  Right now we dont' have a good way
+    // of going between IdentifierInfo* and the class hierarchy.
+    MapTy::iterator I = M.find(ObjCSummaryKey(II, S));
+
+    if (I == M.end())
+      I = M.find(ObjCSummaryKey(S));
+
+    return I == M.end() ? NULL : I->second;
+  }
+
+  const RetainSummary *& operator[](ObjCSummaryKey K) {
+    return M[K];
+  }
+
+  const RetainSummary *& operator[](Selector S) {
+    return M[ ObjCSummaryKey(S) ];
+  }
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Data structures for managing collections of summaries.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class RetainSummaryManager {
+
+  //==-----------------------------------------------------------------==//
+  //  Typedefs.
+  //==-----------------------------------------------------------------==//
+
+  typedef llvm::DenseMap<const FunctionDecl*, const RetainSummary *>
+          FuncSummariesTy;
+
+  typedef ObjCSummaryCache ObjCMethodSummariesTy;
+
+  typedef llvm::FoldingSetNodeWrapper<RetainSummary> CachedSummaryNode;
+
+  //==-----------------------------------------------------------------==//
+  //  Data.
+  //==-----------------------------------------------------------------==//
+
+  /// Ctx - The ASTContext object for the analyzed ASTs.
+  ASTContext &Ctx;
+
+  /// GCEnabled - Records whether or not the analyzed code runs in GC mode.
+  const bool GCEnabled;
+
+  /// Records whether or not the analyzed code runs in ARC mode.
+  const bool ARCEnabled;
+
+  /// FuncSummaries - A map from FunctionDecls to summaries.
+  FuncSummariesTy FuncSummaries;
+
+  /// ObjCClassMethodSummaries - A map from selectors (for instance methods)
+  ///  to summaries.
+  ObjCMethodSummariesTy ObjCClassMethodSummaries;
+
+  /// ObjCMethodSummaries - A map from selectors to summaries.
+  ObjCMethodSummariesTy ObjCMethodSummaries;
+
+  /// BPAlloc - A BumpPtrAllocator used for allocating summaries, ArgEffects,
+  ///  and all other data used by the checker.
+  llvm::BumpPtrAllocator BPAlloc;
+
+  /// AF - A factory for ArgEffects objects.
+  ArgEffects::Factory AF;
+
+  /// ScratchArgs - A holding buffer for construct ArgEffects.
+  ArgEffects ScratchArgs; 
+
+  /// ObjCAllocRetE - Default return effect for methods returning Objective-C
+  ///  objects.
+  RetEffect ObjCAllocRetE;
+
+  /// ObjCInitRetE - Default return effect for init methods returning
+  ///   Objective-C objects.
+  RetEffect ObjCInitRetE;
+
+  /// SimpleSummaries - Used for uniquing summaries that don't have special
+  /// effects.
+  llvm::FoldingSet<CachedSummaryNode> SimpleSummaries;
+
+  //==-----------------------------------------------------------------==//
+  //  Methods.
+  //==-----------------------------------------------------------------==//
+
+  /// getArgEffects - Returns a persistent ArgEffects object based on the
+  ///  data in ScratchArgs.
+  ArgEffects getArgEffects();
+
+  enum UnaryFuncKind { cfretain, cfrelease, cfmakecollectable };
+  
+  const RetainSummary *getUnarySummary(const FunctionType* FT,
+                                       UnaryFuncKind func);
+
+  const RetainSummary *getCFSummaryCreateRule(const FunctionDecl *FD);
+  const RetainSummary *getCFSummaryGetRule(const FunctionDecl *FD);
+  const RetainSummary *getCFCreateGetRuleSummary(const FunctionDecl *FD);
+
+  const RetainSummary *getPersistentSummary(const RetainSummary &OldSumm);
+
+  const RetainSummary *getPersistentSummary(RetEffect RetEff,
+                                            ArgEffect ReceiverEff = DoNothing,
+                                            ArgEffect DefaultEff = MayEscape) {
+    RetainSummary Summ(getArgEffects(), RetEff, DefaultEff, ReceiverEff);
+    return getPersistentSummary(Summ);
+  }
+
+  const RetainSummary *getDoNothingSummary() {
+    return getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+  }
+  
+  const RetainSummary *getDefaultSummary() {
+    return getPersistentSummary(RetEffect::MakeNoRet(),
+                                DoNothing, MayEscape);
+  }
+
+  const RetainSummary *getPersistentStopSummary() {
+    return getPersistentSummary(RetEffect::MakeNoRet(),
+                                StopTracking, StopTracking);
+  }
+
+  void InitializeClassMethodSummaries();
+  void InitializeMethodSummaries();
+private:
+  void addNSObjectClsMethSummary(Selector S, const RetainSummary *Summ) {
+    ObjCClassMethodSummaries[S] = Summ;
+  }
+
+  void addNSObjectMethSummary(Selector S, const RetainSummary *Summ) {
+    ObjCMethodSummaries[S] = Summ;
+  }
+
+  void addClassMethSummary(const char* Cls, const char* name,
+                           const RetainSummary *Summ, bool isNullary = true) {
+    IdentifierInfo* ClsII = &Ctx.Idents.get(Cls);
+    Selector S = isNullary ? GetNullarySelector(name, Ctx) 
+                           : GetUnarySelector(name, Ctx);
+    ObjCClassMethodSummaries[ObjCSummaryKey(ClsII, S)]  = Summ;
+  }
+
+  void addInstMethSummary(const char* Cls, const char* nullaryName,
+                          const RetainSummary *Summ) {
+    IdentifierInfo* ClsII = &Ctx.Idents.get(Cls);
+    Selector S = GetNullarySelector(nullaryName, Ctx);
+    ObjCMethodSummaries[ObjCSummaryKey(ClsII, S)]  = Summ;
+  }
+
+  Selector generateSelector(va_list argp) {
+    SmallVector<IdentifierInfo*, 10> II;
+
+    while (const char* s = va_arg(argp, const char*))
+      II.push_back(&Ctx.Idents.get(s));
+
+    return Ctx.Selectors.getSelector(II.size(), &II[0]);
+  }
+
+  void addMethodSummary(IdentifierInfo *ClsII, ObjCMethodSummariesTy& Summaries,
+                        const RetainSummary * Summ, va_list argp) {
+    Selector S = generateSelector(argp);
+    Summaries[ObjCSummaryKey(ClsII, S)] = Summ;
+  }
+
+  void addInstMethSummary(const char* Cls, const RetainSummary * Summ, ...) {
+    va_list argp;
+    va_start(argp, Summ);
+    addMethodSummary(&Ctx.Idents.get(Cls), ObjCMethodSummaries, Summ, argp);
+    va_end(argp);
+  }
+
+  void addClsMethSummary(const char* Cls, const RetainSummary * Summ, ...) {
+    va_list argp;
+    va_start(argp, Summ);
+    addMethodSummary(&Ctx.Idents.get(Cls),ObjCClassMethodSummaries, Summ, argp);
+    va_end(argp);
+  }
+
+  void addClsMethSummary(IdentifierInfo *II, const RetainSummary * Summ, ...) {
+    va_list argp;
+    va_start(argp, Summ);
+    addMethodSummary(II, ObjCClassMethodSummaries, Summ, argp);
+    va_end(argp);
+  }
+
+public:
+
+  RetainSummaryManager(ASTContext &ctx, bool gcenabled, bool usesARC)
+   : Ctx(ctx),
+     GCEnabled(gcenabled),
+     ARCEnabled(usesARC),
+     AF(BPAlloc), ScratchArgs(AF.getEmptyMap()),
+     ObjCAllocRetE(gcenabled
+                    ? RetEffect::MakeGCNotOwned()
+                    : (usesARC ? RetEffect::MakeARCNotOwned()
+                               : RetEffect::MakeOwned(RetEffect::ObjC, true))),
+     ObjCInitRetE(gcenabled 
+                    ? RetEffect::MakeGCNotOwned()
+                    : (usesARC ? RetEffect::MakeARCNotOwned()
+                               : RetEffect::MakeOwnedWhenTrackedReceiver())) {
+    InitializeClassMethodSummaries();
+    InitializeMethodSummaries();
+  }
+
+  const RetainSummary *getSummary(const CallEvent &Call,
+                                  ProgramStateRef State = 0);
+
+  const RetainSummary *getFunctionSummary(const FunctionDecl *FD);
+
+  const RetainSummary *getMethodSummary(Selector S, const ObjCInterfaceDecl *ID,
+                                        const ObjCMethodDecl *MD,
+                                        QualType RetTy,
+                                        ObjCMethodSummariesTy &CachedSummaries);
+
+  const RetainSummary *getInstanceMethodSummary(const ObjCMethodCall &M,
+                                                ProgramStateRef State);
+
+  const RetainSummary *getClassMethodSummary(const ObjCMethodCall &M) {
+    assert(!M.isInstanceMessage());
+    const ObjCInterfaceDecl *Class = M.getReceiverInterface();
+
+    return getMethodSummary(M.getSelector(), Class, M.getDecl(),
+                            M.getResultType(), ObjCClassMethodSummaries);
+  }
+
+  /// getMethodSummary - This version of getMethodSummary is used to query
+  ///  the summary for the current method being analyzed.
+  const RetainSummary *getMethodSummary(const ObjCMethodDecl *MD) {
+    const ObjCInterfaceDecl *ID = MD->getClassInterface();
+    Selector S = MD->getSelector();
+    QualType ResultTy = MD->getResultType();
+
+    ObjCMethodSummariesTy *CachedSummaries;
+    if (MD->isInstanceMethod())
+      CachedSummaries = &ObjCMethodSummaries;
+    else
+      CachedSummaries = &ObjCClassMethodSummaries;
+
+    return getMethodSummary(S, ID, MD, ResultTy, *CachedSummaries);
+  }
+
+  const RetainSummary *getStandardMethodSummary(const ObjCMethodDecl *MD,
+                                                Selector S, QualType RetTy);
+
+  /// Determine if there is a special return effect for this function or method.
+  Optional<RetEffect> getRetEffectFromAnnotations(QualType RetTy,
+                                                  const Decl *D);
+
+  void updateSummaryFromAnnotations(const RetainSummary *&Summ,
+                                    const ObjCMethodDecl *MD);
+
+  void updateSummaryFromAnnotations(const RetainSummary *&Summ,
+                                    const FunctionDecl *FD);
+
+  void updateSummaryForCall(const RetainSummary *&Summ,
+                            const CallEvent &Call);
+
+  bool isGCEnabled() const { return GCEnabled; }
+
+  bool isARCEnabled() const { return ARCEnabled; }
+  
+  bool isARCorGCEnabled() const { return GCEnabled || ARCEnabled; }
+
+  RetEffect getObjAllocRetEffect() const { return ObjCAllocRetE; }
+
+  friend class RetainSummaryTemplate;
+};
+
+// Used to avoid allocating long-term (BPAlloc'd) memory for default retain
+// summaries. If a function or method looks like it has a default summary, but
+// it has annotations, the annotations are added to the stack-based template
+// and then copied into managed memory.
+class RetainSummaryTemplate {
+  RetainSummaryManager &Manager;
+  const RetainSummary *&RealSummary;
+  RetainSummary ScratchSummary;
+  bool Accessed;
+public:
+  RetainSummaryTemplate(const RetainSummary *&real, RetainSummaryManager &mgr)
+    : Manager(mgr), RealSummary(real), ScratchSummary(*real), Accessed(false) {}
+
+  ~RetainSummaryTemplate() {
+    if (Accessed)
+      RealSummary = Manager.getPersistentSummary(ScratchSummary);
+  }
+
+  RetainSummary &operator*() {
+    Accessed = true;
+    return ScratchSummary;
+  }
+
+  RetainSummary *operator->() {
+    Accessed = true;
+    return &ScratchSummary;
+  }
+};
+
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Implementation of checker data structures.
+//===----------------------------------------------------------------------===//
+
+ArgEffects RetainSummaryManager::getArgEffects() {
+  ArgEffects AE = ScratchArgs;
+  ScratchArgs = AF.getEmptyMap();
+  return AE;
+}
+
+const RetainSummary *
+RetainSummaryManager::getPersistentSummary(const RetainSummary &OldSumm) {
+  // Unique "simple" summaries -- those without ArgEffects.
+  if (OldSumm.isSimple()) {
+    llvm::FoldingSetNodeID ID;
+    OldSumm.Profile(ID);
+
+    void *Pos;
+    CachedSummaryNode *N = SimpleSummaries.FindNodeOrInsertPos(ID, Pos);
+
+    if (!N) {
+      N = (CachedSummaryNode *) BPAlloc.Allocate<CachedSummaryNode>();
+      new (N) CachedSummaryNode(OldSumm);
+      SimpleSummaries.InsertNode(N, Pos);
+    }
+
+    return &N->getValue();
+  }
+
+  RetainSummary *Summ = (RetainSummary *) BPAlloc.Allocate<RetainSummary>();
+  new (Summ) RetainSummary(OldSumm);
+  return Summ;
+}
+
+//===----------------------------------------------------------------------===//
+// Summary creation for functions (largely uses of Core Foundation).
+//===----------------------------------------------------------------------===//
+
+static bool isRetain(const FunctionDecl *FD, StringRef FName) {
+  return FName.endswith("Retain");
+}
+
+static bool isRelease(const FunctionDecl *FD, StringRef FName) {
+  return FName.endswith("Release");
+}
+
+static bool isMakeCollectable(const FunctionDecl *FD, StringRef FName) {
+  // FIXME: Remove FunctionDecl parameter.
+  // FIXME: Is it really okay if MakeCollectable isn't a suffix?
+  return FName.find("MakeCollectable") != StringRef::npos;
+}
+
+static ArgEffect getStopTrackingHardEquivalent(ArgEffect E) {
+  switch (E) {
+  case DoNothing:
+  case Autorelease:
+  case DecRefBridgedTransfered:
+  case IncRef:
+  case IncRefMsg:
+  case MakeCollectable:
+  case MayEscape:
+  case StopTracking:
+  case StopTrackingHard:
+    return StopTrackingHard;
+  case DecRef:
+  case DecRefAndStopTrackingHard:
+    return DecRefAndStopTrackingHard;
+  case DecRefMsg:
+  case DecRefMsgAndStopTrackingHard:
+    return DecRefMsgAndStopTrackingHard;
+  case Dealloc:
+    return Dealloc;
+  }
+
+  llvm_unreachable("Unknown ArgEffect kind");
+}
+
+void RetainSummaryManager::updateSummaryForCall(const RetainSummary *&S,
+                                                const CallEvent &Call) {
+  if (Call.hasNonZeroCallbackArg()) {
+    ArgEffect RecEffect =
+      getStopTrackingHardEquivalent(S->getReceiverEffect());
+    ArgEffect DefEffect =
+      getStopTrackingHardEquivalent(S->getDefaultArgEffect());
+
+    ArgEffects CustomArgEffects = S->getArgEffects();
+    for (ArgEffects::iterator I = CustomArgEffects.begin(),
+                              E = CustomArgEffects.end();
+         I != E; ++I) {
+      ArgEffect Translated = getStopTrackingHardEquivalent(I->second);
+      if (Translated != DefEffect)
+        ScratchArgs = AF.add(ScratchArgs, I->first, Translated);
+    }
+
+    RetEffect RE = RetEffect::MakeNoRetHard();
+
+    // Special cases where the callback argument CANNOT free the return value.
+    // This can generally only happen if we know that the callback will only be
+    // called when the return value is already being deallocated.
+    if (const FunctionCall *FC = dyn_cast<FunctionCall>(&Call)) {
+      if (IdentifierInfo *Name = FC->getDecl()->getIdentifier()) {
+        // When the CGBitmapContext is deallocated, the callback here will free
+        // the associated data buffer.
+        if (Name->isStr("CGBitmapContextCreateWithData"))
+          RE = S->getRetEffect();
+      }
+    }
+
+    S = getPersistentSummary(RE, RecEffect, DefEffect);
+  }
+
+  // Special case '[super init];' and '[self init];'
+  //
+  // Even though calling '[super init]' without assigning the result to self
+  // and checking if the parent returns 'nil' is a bad pattern, it is common.
+  // Additionally, our Self Init checker already warns about it. To avoid
+  // overwhelming the user with messages from both checkers, we model the case
+  // of '[super init]' in cases when it is not consumed by another expression
+  // as if the call preserves the value of 'self'; essentially, assuming it can 
+  // never fail and return 'nil'.
+  // Note, we don't want to just stop tracking the value since we want the
+  // RetainCount checker to report leaks and use-after-free if SelfInit checker
+  // is turned off.
+  if (const ObjCMethodCall *MC = dyn_cast<ObjCMethodCall>(&Call)) {
+    if (MC->getMethodFamily() == OMF_init && MC->isReceiverSelfOrSuper()) {
+
+      // Check if the message is not consumed, we know it will not be used in
+      // an assignment, ex: "self = [super init]".
+      const Expr *ME = MC->getOriginExpr();
+      const LocationContext *LCtx = MC->getLocationContext();
+      ParentMap &PM = LCtx->getAnalysisDeclContext()->getParentMap();
+      if (!PM.isConsumedExpr(ME)) {
+        RetainSummaryTemplate ModifiableSummaryTemplate(S, *this);
+        ModifiableSummaryTemplate->setReceiverEffect(DoNothing);
+        ModifiableSummaryTemplate->setRetEffect(RetEffect::MakeNoRet());
+      }
+    }
+
+  }
+}
+
+const RetainSummary *
+RetainSummaryManager::getSummary(const CallEvent &Call,
+                                 ProgramStateRef State) {
+  const RetainSummary *Summ;
+  switch (Call.getKind()) {
+  case CE_Function:
+    Summ = getFunctionSummary(cast<FunctionCall>(Call).getDecl());
+    break;
+  case CE_CXXMember:
+  case CE_CXXMemberOperator:
+  case CE_Block:
+  case CE_CXXConstructor:
+  case CE_CXXDestructor:
+  case CE_CXXAllocator:
+    // FIXME: These calls are currently unsupported.
+    return getPersistentStopSummary();
+  case CE_ObjCMessage: {
+    const ObjCMethodCall &Msg = cast<ObjCMethodCall>(Call);
+    if (Msg.isInstanceMessage())
+      Summ = getInstanceMethodSummary(Msg, State);
+    else
+      Summ = getClassMethodSummary(Msg);
+    break;
+  }
+  }
+
+  updateSummaryForCall(Summ, Call);
+
+  assert(Summ && "Unknown call type?");
+  return Summ;
+}
+
+const RetainSummary *
+RetainSummaryManager::getFunctionSummary(const FunctionDecl *FD) {
+  // If we don't know what function we're calling, use our default summary.
+  if (!FD)
+    return getDefaultSummary();
+
+  // Look up a summary in our cache of FunctionDecls -> Summaries.
+  FuncSummariesTy::iterator I = FuncSummaries.find(FD);
+  if (I != FuncSummaries.end())
+    return I->second;
+
+  // No summary?  Generate one.
+  const RetainSummary *S = 0;
+  bool AllowAnnotations = true;
+
+  do {
+    // We generate "stop" summaries for implicitly defined functions.
+    if (FD->isImplicit()) {
+      S = getPersistentStopSummary();
+      break;
+    }
+
+    // [PR 3337] Use 'getAs<FunctionType>' to strip away any typedefs on the
+    // function's type.
+    const FunctionType* FT = FD->getType()->getAs<FunctionType>();
+    const IdentifierInfo *II = FD->getIdentifier();
+    if (!II)
+      break;
+
+    StringRef FName = II->getName();
+
+    // Strip away preceding '_'.  Doing this here will effect all the checks
+    // down below.
+    FName = FName.substr(FName.find_first_not_of('_'));
+
+    // Inspect the result type.
+    QualType RetTy = FT->getResultType();
+
+    // FIXME: This should all be refactored into a chain of "summary lookup"
+    //  filters.
+    assert(ScratchArgs.isEmpty());
+
+    if (FName == "pthread_create" || FName == "pthread_setspecific") {
+      // Part of: <rdar://problem/7299394> and <rdar://problem/11282706>.
+      // This will be addressed better with IPA.
+      S = getPersistentStopSummary();
+    } else if (FName == "NSMakeCollectable") {
+      // Handle: id NSMakeCollectable(CFTypeRef)
+      S = (RetTy->isObjCIdType())
+          ? getUnarySummary(FT, cfmakecollectable)
+          : getPersistentStopSummary();
+      // The headers on OS X 10.8 use cf_consumed/ns_returns_retained,
+      // but we can fully model NSMakeCollectable ourselves.
+      AllowAnnotations = false;
+    } else if (FName == "CFPlugInInstanceCreate") {
+      S = getPersistentSummary(RetEffect::MakeNoRet());
+    } else if (FName == "IOBSDNameMatching" ||
+               FName == "IOServiceMatching" ||
+               FName == "IOServiceNameMatching" ||
+               FName == "IORegistryEntrySearchCFProperty" ||
+               FName == "IORegistryEntryIDMatching" ||
+               FName == "IOOpenFirmwarePathMatching") {
+      // Part of <rdar://problem/6961230>. (IOKit)
+      // This should be addressed using a API table.
+      S = getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true),
+                               DoNothing, DoNothing);
+    } else if (FName == "IOServiceGetMatchingService" ||
+               FName == "IOServiceGetMatchingServices") {
+      // FIXES: <rdar://problem/6326900>
+      // This should be addressed using a API table.  This strcmp is also
+      // a little gross, but there is no need to super optimize here.
+      ScratchArgs = AF.add(ScratchArgs, 1, DecRef);
+      S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+    } else if (FName == "IOServiceAddNotification" ||
+               FName == "IOServiceAddMatchingNotification") {
+      // Part of <rdar://problem/6961230>. (IOKit)
+      // This should be addressed using a API table.
+      ScratchArgs = AF.add(ScratchArgs, 2, DecRef);
+      S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+    } else if (FName == "CVPixelBufferCreateWithBytes") {
+      // FIXES: <rdar://problem/7283567>
+      // Eventually this can be improved by recognizing that the pixel
+      // buffer passed to CVPixelBufferCreateWithBytes is released via
+      // a callback and doing full IPA to make sure this is done correctly.
+      // FIXME: This function has an out parameter that returns an
+      // allocated object.
+      ScratchArgs = AF.add(ScratchArgs, 7, StopTracking);
+      S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+    } else if (FName == "CGBitmapContextCreateWithData") {
+      // FIXES: <rdar://problem/7358899>
+      // Eventually this can be improved by recognizing that 'releaseInfo'
+      // passed to CGBitmapContextCreateWithData is released via
+      // a callback and doing full IPA to make sure this is done correctly.
+      ScratchArgs = AF.add(ScratchArgs, 8, StopTracking);
+      S = getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true),
+                               DoNothing, DoNothing);
+    } else if (FName == "CVPixelBufferCreateWithPlanarBytes") {
+      // FIXES: <rdar://problem/7283567>
+      // Eventually this can be improved by recognizing that the pixel
+      // buffer passed to CVPixelBufferCreateWithPlanarBytes is released
+      // via a callback and doing full IPA to make sure this is done
+      // correctly.
+      ScratchArgs = AF.add(ScratchArgs, 12, StopTracking);
+      S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+    } else if (FName == "dispatch_set_context" ||
+               FName == "xpc_connection_set_context") {
+      // <rdar://problem/11059275> - The analyzer currently doesn't have
+      // a good way to reason about the finalizer function for libdispatch.
+      // If we pass a context object that is memory managed, stop tracking it.
+      // <rdar://problem/13783514> - Same problem, but for XPC.
+      // FIXME: this hack should possibly go away once we can handle
+      // libdispatch and XPC finalizers.
+      ScratchArgs = AF.add(ScratchArgs, 1, StopTracking);
+      S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+    } else if (FName.startswith("NSLog")) {
+      S = getDoNothingSummary();
+    } else if (FName.startswith("NS") &&
+                (FName.find("Insert") != StringRef::npos)) {
+      // Whitelist NSXXInsertXX, for example NSMapInsertIfAbsent, since they can
+      // be deallocated by NSMapRemove. (radar://11152419)
+      ScratchArgs = AF.add(ScratchArgs, 1, StopTracking);
+      ScratchArgs = AF.add(ScratchArgs, 2, StopTracking);
+      S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+    }
+
+    // Did we get a summary?
+    if (S)
+      break;
+
+    if (RetTy->isPointerType()) {      
+      // For CoreFoundation ('CF') types.
+      if (cocoa::isRefType(RetTy, "CF", FName)) {
+        if (isRetain(FD, FName))
+          S = getUnarySummary(FT, cfretain);
+        else if (isMakeCollectable(FD, FName))
+          S = getUnarySummary(FT, cfmakecollectable);
+        else
+          S = getCFCreateGetRuleSummary(FD);
+
+        break;
+      }
+
+      // For CoreGraphics ('CG') types.
+      if (cocoa::isRefType(RetTy, "CG", FName)) {
+        if (isRetain(FD, FName))
+          S = getUnarySummary(FT, cfretain);
+        else
+          S = getCFCreateGetRuleSummary(FD);
+
+        break;
+      }
+
+      // For the Disk Arbitration API (DiskArbitration/DADisk.h)
+      if (cocoa::isRefType(RetTy, "DADisk") ||
+          cocoa::isRefType(RetTy, "DADissenter") ||
+          cocoa::isRefType(RetTy, "DASessionRef")) {
+        S = getCFCreateGetRuleSummary(FD);
+        break;
+      }
+
+      if (FD->getAttr<CFAuditedTransferAttr>()) {
+        S = getCFCreateGetRuleSummary(FD);
+        break;
+      }
+
+      break;
+    }
+
+    // Check for release functions, the only kind of functions that we care
+    // about that don't return a pointer type.
+    if (FName[0] == 'C' && (FName[1] == 'F' || FName[1] == 'G')) {
+      // Test for 'CGCF'.
+      FName = FName.substr(FName.startswith("CGCF") ? 4 : 2);
+
+      if (isRelease(FD, FName))
+        S = getUnarySummary(FT, cfrelease);
+      else {
+        assert (ScratchArgs.isEmpty());
+        // Remaining CoreFoundation and CoreGraphics functions.
+        // We use to assume that they all strictly followed the ownership idiom
+        // and that ownership cannot be transferred.  While this is technically
+        // correct, many methods allow a tracked object to escape.  For example:
+        //
+        //   CFMutableDictionaryRef x = CFDictionaryCreateMutable(...);
+        //   CFDictionaryAddValue(y, key, x);
+        //   CFRelease(x);
+        //   ... it is okay to use 'x' since 'y' has a reference to it
+        //
+        // We handle this and similar cases with the follow heuristic.  If the
+        // function name contains "InsertValue", "SetValue", "AddValue",
+        // "AppendValue", or "SetAttribute", then we assume that arguments may
+        // "escape."  This means that something else holds on to the object,
+        // allowing it be used even after its local retain count drops to 0.
+        ArgEffect E = (StrInStrNoCase(FName, "InsertValue") != StringRef::npos||
+                       StrInStrNoCase(FName, "AddValue") != StringRef::npos ||
+                       StrInStrNoCase(FName, "SetValue") != StringRef::npos ||
+                       StrInStrNoCase(FName, "AppendValue") != StringRef::npos||
+                       StrInStrNoCase(FName, "SetAttribute") != StringRef::npos)
+                      ? MayEscape : DoNothing;
+
+        S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, E);
+      }
+    }
+  }
+  while (0);
+
+  // If we got all the way here without any luck, use a default summary.
+  if (!S)
+    S = getDefaultSummary();
+
+  // Annotations override defaults.
+  if (AllowAnnotations)
+    updateSummaryFromAnnotations(S, FD);
+
+  FuncSummaries[FD] = S;
+  return S;
+}
+
+const RetainSummary *
+RetainSummaryManager::getCFCreateGetRuleSummary(const FunctionDecl *FD) {
+  if (coreFoundation::followsCreateRule(FD))
+    return getCFSummaryCreateRule(FD);
+
+  return getCFSummaryGetRule(FD);
+}
+
+const RetainSummary *
+RetainSummaryManager::getUnarySummary(const FunctionType* FT,
+                                      UnaryFuncKind func) {
+
+  // Sanity check that this is *really* a unary function.  This can
+  // happen if people do weird things.
+  const FunctionProtoType* FTP = dyn_cast<FunctionProtoType>(FT);
+  if (!FTP || FTP->getNumArgs() != 1)
+    return getPersistentStopSummary();
+
+  assert (ScratchArgs.isEmpty());
+
+  ArgEffect Effect;
+  switch (func) {
+    case cfretain: Effect = IncRef; break;
+    case cfrelease: Effect = DecRef; break;
+    case cfmakecollectable: Effect = MakeCollectable; break;
+  }
+
+  ScratchArgs = AF.add(ScratchArgs, 0, Effect);
+  return getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+}
+
+const RetainSummary * 
+RetainSummaryManager::getCFSummaryCreateRule(const FunctionDecl *FD) {
+  assert (ScratchArgs.isEmpty());
+
+  return getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true));
+}
+
+const RetainSummary * 
+RetainSummaryManager::getCFSummaryGetRule(const FunctionDecl *FD) {
+  assert (ScratchArgs.isEmpty());
+  return getPersistentSummary(RetEffect::MakeNotOwned(RetEffect::CF),
+                              DoNothing, DoNothing);
+}
+
+//===----------------------------------------------------------------------===//
+// Summary creation for Selectors.
+//===----------------------------------------------------------------------===//
+
+Optional<RetEffect>
+RetainSummaryManager::getRetEffectFromAnnotations(QualType RetTy,
+                                                  const Decl *D) {
+  if (cocoa::isCocoaObjectRef(RetTy)) {
+    if (D->getAttr<NSReturnsRetainedAttr>())
+      return ObjCAllocRetE;
+
+    if (D->getAttr<NSReturnsNotRetainedAttr>() ||
+        D->getAttr<NSReturnsAutoreleasedAttr>())
+      return RetEffect::MakeNotOwned(RetEffect::ObjC);
+
+  } else if (!RetTy->isPointerType()) {
+    return None;
+  }
+
+  if (D->getAttr<CFReturnsRetainedAttr>())
+    return RetEffect::MakeOwned(RetEffect::CF, true);
+
+  if (D->getAttr<CFReturnsNotRetainedAttr>())
+    return RetEffect::MakeNotOwned(RetEffect::CF);
+
+  return None;
+}
+
+void
+RetainSummaryManager::updateSummaryFromAnnotations(const RetainSummary *&Summ,
+                                                   const FunctionDecl *FD) {
+  if (!FD)
+    return;
+
+  assert(Summ && "Must have a summary to add annotations to.");
+  RetainSummaryTemplate Template(Summ, *this);
+
+  // Effects on the parameters.
+  unsigned parm_idx = 0;
+  for (FunctionDecl::param_const_iterator pi = FD->param_begin(), 
+         pe = FD->param_end(); pi != pe; ++pi, ++parm_idx) {
+    const ParmVarDecl *pd = *pi;
+    if (pd->getAttr<NSConsumedAttr>())
+      Template->addArg(AF, parm_idx, DecRefMsg);
+    else if (pd->getAttr<CFConsumedAttr>())
+      Template->addArg(AF, parm_idx, DecRef);      
+  }
+  
+  QualType RetTy = FD->getResultType();
+  if (Optional<RetEffect> RetE = getRetEffectFromAnnotations(RetTy, FD))
+    Template->setRetEffect(*RetE);
+}
+
+void
+RetainSummaryManager::updateSummaryFromAnnotations(const RetainSummary *&Summ,
+                                                   const ObjCMethodDecl *MD) {
+  if (!MD)
+    return;
+
+  assert(Summ && "Must have a valid summary to add annotations to");
+  RetainSummaryTemplate Template(Summ, *this);
+
+  // Effects on the receiver.
+  if (MD->getAttr<NSConsumesSelfAttr>())
+    Template->setReceiverEffect(DecRefMsg);      
+  
+  // Effects on the parameters.
+  unsigned parm_idx = 0;
+  for (ObjCMethodDecl::param_const_iterator
+         pi=MD->param_begin(), pe=MD->param_end();
+       pi != pe; ++pi, ++parm_idx) {
+    const ParmVarDecl *pd = *pi;
+    if (pd->getAttr<NSConsumedAttr>())
+      Template->addArg(AF, parm_idx, DecRefMsg);      
+    else if (pd->getAttr<CFConsumedAttr>()) {
+      Template->addArg(AF, parm_idx, DecRef);      
+    }   
+  }
+  
+  QualType RetTy = MD->getResultType();
+  if (Optional<RetEffect> RetE = getRetEffectFromAnnotations(RetTy, MD))
+    Template->setRetEffect(*RetE);
+}
+
+const RetainSummary *
+RetainSummaryManager::getStandardMethodSummary(const ObjCMethodDecl *MD,
+                                               Selector S, QualType RetTy) {
+  // Any special effects?
+  ArgEffect ReceiverEff = DoNothing;
+  RetEffect ResultEff = RetEffect::MakeNoRet();
+
+  // Check the method family, and apply any default annotations.
+  switch (MD ? MD->getMethodFamily() : S.getMethodFamily()) {
+    case OMF_None:
+    case OMF_performSelector:
+      // Assume all Objective-C methods follow Cocoa Memory Management rules.
+      // FIXME: Does the non-threaded performSelector family really belong here?
+      // The selector could be, say, @selector(copy).
+      if (cocoa::isCocoaObjectRef(RetTy))
+        ResultEff = RetEffect::MakeNotOwned(RetEffect::ObjC);
+      else if (coreFoundation::isCFObjectRef(RetTy)) {
+        // ObjCMethodDecl currently doesn't consider CF objects as valid return 
+        // values for alloc, new, copy, or mutableCopy, so we have to
+        // double-check with the selector. This is ugly, but there aren't that
+        // many Objective-C methods that return CF objects, right?
+        if (MD) {
+          switch (S.getMethodFamily()) {
+          case OMF_alloc:
+          case OMF_new:
+          case OMF_copy:
+          case OMF_mutableCopy:
+            ResultEff = RetEffect::MakeOwned(RetEffect::CF, true);
+            break;
+          default:
+            ResultEff = RetEffect::MakeNotOwned(RetEffect::CF);        
+            break;
+          }
+        } else {
+          ResultEff = RetEffect::MakeNotOwned(RetEffect::CF);        
+        }
+      }
+      break;
+    case OMF_init:
+      ResultEff = ObjCInitRetE;
+      ReceiverEff = DecRefMsg;
+      break;
+    case OMF_alloc:
+    case OMF_new:
+    case OMF_copy:
+    case OMF_mutableCopy:
+      if (cocoa::isCocoaObjectRef(RetTy))
+        ResultEff = ObjCAllocRetE;
+      else if (coreFoundation::isCFObjectRef(RetTy))
+        ResultEff = RetEffect::MakeOwned(RetEffect::CF, true);
+      break;
+    case OMF_autorelease:
+      ReceiverEff = Autorelease;
+      break;
+    case OMF_retain:
+      ReceiverEff = IncRefMsg;
+      break;
+    case OMF_release:
+      ReceiverEff = DecRefMsg;
+      break;
+    case OMF_dealloc:
+      ReceiverEff = Dealloc;
+      break;
+    case OMF_self:
+      // -self is handled specially by the ExprEngine to propagate the receiver.
+      break;
+    case OMF_retainCount:
+    case OMF_finalize:
+      // These methods don't return objects.
+      break;
+  }
+
+  // If one of the arguments in the selector has the keyword 'delegate' we
+  // should stop tracking the reference count for the receiver.  This is
+  // because the reference count is quite possibly handled by a delegate
+  // method.
+  if (S.isKeywordSelector()) {
+    for (unsigned i = 0, e = S.getNumArgs(); i != e; ++i) {
+      StringRef Slot = S.getNameForSlot(i);
+      if (Slot.substr(Slot.size() - 8).equals_lower("delegate")) {
+        if (ResultEff == ObjCInitRetE)
+          ResultEff = RetEffect::MakeNoRetHard();
+        else
+          ReceiverEff = StopTrackingHard;
+      }
+    }
+  }
+
+  if (ScratchArgs.isEmpty() && ReceiverEff == DoNothing &&
+      ResultEff.getKind() == RetEffect::NoRet)
+    return getDefaultSummary();
+
+  return getPersistentSummary(ResultEff, ReceiverEff, MayEscape);
+}
+
+const RetainSummary *
+RetainSummaryManager::getInstanceMethodSummary(const ObjCMethodCall &Msg,
+                                               ProgramStateRef State) {
+  const ObjCInterfaceDecl *ReceiverClass = 0;
+
+  // We do better tracking of the type of the object than the core ExprEngine.
+  // See if we have its type in our private state.
+  // FIXME: Eventually replace the use of state->get<RefBindings> with
+  // a generic API for reasoning about the Objective-C types of symbolic
+  // objects.
+  SVal ReceiverV = Msg.getReceiverSVal();
+  if (SymbolRef Sym = ReceiverV.getAsLocSymbol())
+    if (const RefVal *T = getRefBinding(State, Sym))
+      if (const ObjCObjectPointerType *PT =
+            T->getType()->getAs<ObjCObjectPointerType>())
+        ReceiverClass = PT->getInterfaceDecl();
+
+  // If we don't know what kind of object this is, fall back to its static type.
+  if (!ReceiverClass)
+    ReceiverClass = Msg.getReceiverInterface();
+
+  // FIXME: The receiver could be a reference to a class, meaning that
+  //  we should use the class method.
+  // id x = [NSObject class];
+  // [x performSelector:... withObject:... afterDelay:...];
+  Selector S = Msg.getSelector();
+  const ObjCMethodDecl *Method = Msg.getDecl();
+  if (!Method && ReceiverClass)
+    Method = ReceiverClass->getInstanceMethod(S);
+
+  return getMethodSummary(S, ReceiverClass, Method, Msg.getResultType(),
+                          ObjCMethodSummaries);
+}
+
+const RetainSummary *
+RetainSummaryManager::getMethodSummary(Selector S, const ObjCInterfaceDecl *ID,
+                                       const ObjCMethodDecl *MD, QualType RetTy,
+                                       ObjCMethodSummariesTy &CachedSummaries) {
+
+  // Look up a summary in our summary cache.
+  const RetainSummary *Summ = CachedSummaries.find(ID, S);
+
+  if (!Summ) {
+    Summ = getStandardMethodSummary(MD, S, RetTy);
+
+    // Annotations override defaults.
+    updateSummaryFromAnnotations(Summ, MD);
+
+    // Memoize the summary.
+    CachedSummaries[ObjCSummaryKey(ID, S)] = Summ;
+  }
+
+  return Summ;
+}
+
+void RetainSummaryManager::InitializeClassMethodSummaries() {
+  assert(ScratchArgs.isEmpty());
+  // Create the [NSAssertionHandler currentHander] summary.
+  addClassMethSummary("NSAssertionHandler", "currentHandler",
+                getPersistentSummary(RetEffect::MakeNotOwned(RetEffect::ObjC)));
+
+  // Create the [NSAutoreleasePool addObject:] summary.
+  ScratchArgs = AF.add(ScratchArgs, 0, Autorelease);
+  addClassMethSummary("NSAutoreleasePool", "addObject",
+                      getPersistentSummary(RetEffect::MakeNoRet(),
+                                           DoNothing, Autorelease));
+}
+
+void RetainSummaryManager::InitializeMethodSummaries() {
+
+  assert (ScratchArgs.isEmpty());
+
+  // Create the "init" selector.  It just acts as a pass-through for the
+  // receiver.
+  const RetainSummary *InitSumm = getPersistentSummary(ObjCInitRetE, DecRefMsg);
+  addNSObjectMethSummary(GetNullarySelector("init", Ctx), InitSumm);
+
+  // awakeAfterUsingCoder: behaves basically like an 'init' method.  It
+  // claims the receiver and returns a retained object.
+  addNSObjectMethSummary(GetUnarySelector("awakeAfterUsingCoder", Ctx),
+                         InitSumm);
+
+  // The next methods are allocators.
+  const RetainSummary *AllocSumm = getPersistentSummary(ObjCAllocRetE);
+  const RetainSummary *CFAllocSumm =
+    getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true));
+
+  // Create the "retain" selector.
+  RetEffect NoRet = RetEffect::MakeNoRet();
+  const RetainSummary *Summ = getPersistentSummary(NoRet, IncRefMsg);
+  addNSObjectMethSummary(GetNullarySelector("retain", Ctx), Summ);
+
+  // Create the "release" selector.
+  Summ = getPersistentSummary(NoRet, DecRefMsg);
+  addNSObjectMethSummary(GetNullarySelector("release", Ctx), Summ);
+
+  // Create the -dealloc summary.
+  Summ = getPersistentSummary(NoRet, Dealloc);
+  addNSObjectMethSummary(GetNullarySelector("dealloc", Ctx), Summ);
+
+  // Create the "autorelease" selector.
+  Summ = getPersistentSummary(NoRet, Autorelease);
+  addNSObjectMethSummary(GetNullarySelector("autorelease", Ctx), Summ);
+
+  // For NSWindow, allocated objects are (initially) self-owned.
+  // FIXME: For now we opt for false negatives with NSWindow, as these objects
+  //  self-own themselves.  However, they only do this once they are displayed.
+  //  Thus, we need to track an NSWindow's display status.
+  //  This is tracked in <rdar://problem/6062711>.
+  //  See also http://llvm.org/bugs/show_bug.cgi?id=3714.
+  const RetainSummary *NoTrackYet = getPersistentSummary(RetEffect::MakeNoRet(),
+                                                   StopTracking,
+                                                   StopTracking);
+
+  addClassMethSummary("NSWindow", "alloc", NoTrackYet);
+
+  // For NSPanel (which subclasses NSWindow), allocated objects are not
+  //  self-owned.
+  // FIXME: For now we don't track NSPanels. object for the same reason
+  //   as for NSWindow objects.
+  addClassMethSummary("NSPanel", "alloc", NoTrackYet);
+
+  // Don't track allocated autorelease pools, as it is okay to prematurely
+  // exit a method.
+  addClassMethSummary("NSAutoreleasePool", "alloc", NoTrackYet);
+  addClassMethSummary("NSAutoreleasePool", "allocWithZone", NoTrackYet, false);
+  addClassMethSummary("NSAutoreleasePool", "new", NoTrackYet);
+
+  // Create summaries QCRenderer/QCView -createSnapShotImageOfType:
+  addInstMethSummary("QCRenderer", AllocSumm,
+                     "createSnapshotImageOfType", NULL);
+  addInstMethSummary("QCView", AllocSumm,
+                     "createSnapshotImageOfType", NULL);
+
+  // Create summaries for CIContext, 'createCGImage' and
+  // 'createCGLayerWithSize'.  These objects are CF objects, and are not
+  // automatically garbage collected.
+  addInstMethSummary("CIContext", CFAllocSumm,
+                     "createCGImage", "fromRect", NULL);
+  addInstMethSummary("CIContext", CFAllocSumm,
+                     "createCGImage", "fromRect", "format", "colorSpace", NULL);
+  addInstMethSummary("CIContext", CFAllocSumm, "createCGLayerWithSize",
+           "info", NULL);
+}
+
+//===----------------------------------------------------------------------===//
+// Error reporting.
+//===----------------------------------------------------------------------===//
+namespace {
+  typedef llvm::DenseMap<const ExplodedNode *, const RetainSummary *>
+    SummaryLogTy;
+
+  //===-------------===//
+  // Bug Descriptions. //
+  //===-------------===//
+
+  class CFRefBug : public BugType {
+  protected:
+    CFRefBug(StringRef name)
+    : BugType(name, categories::MemoryCoreFoundationObjectiveC) {}
+  public:
+
+    // FIXME: Eventually remove.
+    virtual const char *getDescription() const = 0;
+
+    virtual bool isLeak() const { return false; }
+  };
+
+  class UseAfterRelease : public CFRefBug {
+  public:
+    UseAfterRelease() : CFRefBug("Use-after-release") {}
+
+    const char *getDescription() const {
+      return "Reference-counted object is used after it is released";
+    }
+  };
+
+  class BadRelease : public CFRefBug {
+  public:
+    BadRelease() : CFRefBug("Bad release") {}
+
+    const char *getDescription() const {
+      return "Incorrect decrement of the reference count of an object that is "
+             "not owned at this point by the caller";
+    }
+  };
+
+  class DeallocGC : public CFRefBug {
+  public:
+    DeallocGC()
+    : CFRefBug("-dealloc called while using garbage collection") {}
+
+    const char *getDescription() const {
+      return "-dealloc called while using garbage collection";
+    }
+  };
+
+  class DeallocNotOwned : public CFRefBug {
+  public:
+    DeallocNotOwned()
+    : CFRefBug("-dealloc sent to non-exclusively owned object") {}
+
+    const char *getDescription() const {
+      return "-dealloc sent to object that may be referenced elsewhere";
+    }
+  };
+
+  class OverAutorelease : public CFRefBug {
+  public:
+    OverAutorelease()
+    : CFRefBug("Object autoreleased too many times") {}
+
+    const char *getDescription() const {
+      return "Object autoreleased too many times";
+    }
+  };
+
+  class ReturnedNotOwnedForOwned : public CFRefBug {
+  public:
+    ReturnedNotOwnedForOwned()
+    : CFRefBug("Method should return an owned object") {}
+
+    const char *getDescription() const {
+      return "Object with a +0 retain count returned to caller where a +1 "
+             "(owning) retain count is expected";
+    }
+  };
+
+  class Leak : public CFRefBug {
+  public:
+    Leak(StringRef name)
+    : CFRefBug(name) {
+      // Leaks should not be reported if they are post-dominated by a sink.
+      setSuppressOnSink(true);
+    }
+
+    const char *getDescription() const { return ""; }
+
+    bool isLeak() const { return true; }
+  };
+
+  //===---------===//
+  // Bug Reports.  //
+  //===---------===//
+
+  class CFRefReportVisitor : public BugReporterVisitorImpl<CFRefReportVisitor> {
+  protected:
+    SymbolRef Sym;
+    const SummaryLogTy &SummaryLog;
+    bool GCEnabled;
+    
+  public:
+    CFRefReportVisitor(SymbolRef sym, bool gcEnabled, const SummaryLogTy &log)
+       : Sym(sym), SummaryLog(log), GCEnabled(gcEnabled) {}
+
+    virtual void Profile(llvm::FoldingSetNodeID &ID) const {
+      static int x = 0;
+      ID.AddPointer(&x);
+      ID.AddPointer(Sym);
+    }
+
+    virtual PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+                                           const ExplodedNode *PrevN,
+                                           BugReporterContext &BRC,
+                                           BugReport &BR);
+
+    virtual PathDiagnosticPiece *getEndPath(BugReporterContext &BRC,
+                                            const ExplodedNode *N,
+                                            BugReport &BR);
+  };
+
+  class CFRefLeakReportVisitor : public CFRefReportVisitor {
+  public:
+    CFRefLeakReportVisitor(SymbolRef sym, bool GCEnabled,
+                           const SummaryLogTy &log)
+       : CFRefReportVisitor(sym, GCEnabled, log) {}
+
+    PathDiagnosticPiece *getEndPath(BugReporterContext &BRC,
+                                    const ExplodedNode *N,
+                                    BugReport &BR);
+
+    virtual BugReporterVisitor *clone() const {
+      // The curiously-recurring template pattern only works for one level of
+      // subclassing. Rather than make a new template base for
+      // CFRefReportVisitor, we simply override clone() to do the right thing.
+      // This could be trouble someday if BugReporterVisitorImpl is ever
+      // used for something else besides a convenient implementation of clone().
+      return new CFRefLeakReportVisitor(*this);
+    }
+  };
+
+  class CFRefReport : public BugReport {
+    void addGCModeDescription(const LangOptions &LOpts, bool GCEnabled);
+
+  public:
+    CFRefReport(CFRefBug &D, const LangOptions &LOpts, bool GCEnabled,
+                const SummaryLogTy &Log, ExplodedNode *n, SymbolRef sym,
+                bool registerVisitor = true)
+      : BugReport(D, D.getDescription(), n) {
+      if (registerVisitor)
+        addVisitor(new CFRefReportVisitor(sym, GCEnabled, Log));
+      addGCModeDescription(LOpts, GCEnabled);
+    }
+
+    CFRefReport(CFRefBug &D, const LangOptions &LOpts, bool GCEnabled,
+                const SummaryLogTy &Log, ExplodedNode *n, SymbolRef sym,
+                StringRef endText)
+      : BugReport(D, D.getDescription(), endText, n) {
+      addVisitor(new CFRefReportVisitor(sym, GCEnabled, Log));
+      addGCModeDescription(LOpts, GCEnabled);
+    }
+
+    virtual std::pair<ranges_iterator, ranges_iterator> getRanges() {
+      const CFRefBug& BugTy = static_cast<CFRefBug&>(getBugType());
+      if (!BugTy.isLeak())
+        return BugReport::getRanges();
+      else
+        return std::make_pair(ranges_iterator(), ranges_iterator());
+    }
+  };
+
+  class CFRefLeakReport : public CFRefReport {
+    const MemRegion* AllocBinding;
+  public:
+    CFRefLeakReport(CFRefBug &D, const LangOptions &LOpts, bool GCEnabled,
+                    const SummaryLogTy &Log, ExplodedNode *n, SymbolRef sym,
+                    CheckerContext &Ctx,
+                    bool IncludeAllocationLine);
+
+    PathDiagnosticLocation getLocation(const SourceManager &SM) const {
+      assert(Location.isValid());
+      return Location;
+    }
+  };
+} // end anonymous namespace
+
+void CFRefReport::addGCModeDescription(const LangOptions &LOpts,
+                                       bool GCEnabled) {
+  const char *GCModeDescription = 0;
+
+  switch (LOpts.getGC()) {
+  case LangOptions::GCOnly:
+    assert(GCEnabled);
+    GCModeDescription = "Code is compiled to only use garbage collection";
+    break;
+
+  case LangOptions::NonGC:
+    assert(!GCEnabled);
+    GCModeDescription = "Code is compiled to use reference counts";
+    break;
+
+  case LangOptions::HybridGC:
+    if (GCEnabled) {
+      GCModeDescription = "Code is compiled to use either garbage collection "
+                          "(GC) or reference counts (non-GC).  The bug occurs "
+                          "with GC enabled";
+      break;
+    } else {
+      GCModeDescription = "Code is compiled to use either garbage collection "
+                          "(GC) or reference counts (non-GC).  The bug occurs "
+                          "in non-GC mode";
+      break;
+    }
+  }
+
+  assert(GCModeDescription && "invalid/unknown GC mode");
+  addExtraText(GCModeDescription);
+}
+
+// FIXME: This should be a method on SmallVector.
+static inline bool contains(const SmallVectorImpl<ArgEffect>& V,
+                            ArgEffect X) {
+  for (SmallVectorImpl<ArgEffect>::const_iterator I=V.begin(), E=V.end();
+       I!=E; ++I)
+    if (*I == X) return true;
+
+  return false;
+}
+
+static bool isNumericLiteralExpression(const Expr *E) {
+  // FIXME: This set of cases was copied from SemaExprObjC.
+  return isa<IntegerLiteral>(E) || 
+         isa<CharacterLiteral>(E) ||
+         isa<FloatingLiteral>(E) ||
+         isa<ObjCBoolLiteralExpr>(E) ||
+         isa<CXXBoolLiteralExpr>(E);
+}
+
+PathDiagnosticPiece *CFRefReportVisitor::VisitNode(const ExplodedNode *N,
+                                                   const ExplodedNode *PrevN,
+                                                   BugReporterContext &BRC,
+                                                   BugReport &BR) {
+  // FIXME: We will eventually need to handle non-statement-based events
+  // (__attribute__((cleanup))).
+  if (!N->getLocation().getAs<StmtPoint>())
+    return NULL;
+
+  // Check if the type state has changed.
+  ProgramStateRef PrevSt = PrevN->getState();
+  ProgramStateRef CurrSt = N->getState();
+  const LocationContext *LCtx = N->getLocationContext();
+
+  const RefVal* CurrT = getRefBinding(CurrSt, Sym);
+  if (!CurrT) return NULL;
+
+  const RefVal &CurrV = *CurrT;
+  const RefVal *PrevT = getRefBinding(PrevSt, Sym);
+
+  // Create a string buffer to constain all the useful things we want
+  // to tell the user.
+  std::string sbuf;
+  llvm::raw_string_ostream os(sbuf);
+
+  // This is the allocation site since the previous node had no bindings
+  // for this symbol.
+  if (!PrevT) {
+    const Stmt *S = N->getLocation().castAs<StmtPoint>().getStmt();
+
+    if (isa<ObjCArrayLiteral>(S)) {
+      os << "NSArray literal is an object with a +0 retain count";
+    }
+    else if (isa<ObjCDictionaryLiteral>(S)) {
+      os << "NSDictionary literal is an object with a +0 retain count";
+    }
+    else if (const ObjCBoxedExpr *BL = dyn_cast<ObjCBoxedExpr>(S)) {
+      if (isNumericLiteralExpression(BL->getSubExpr()))
+        os << "NSNumber literal is an object with a +0 retain count";
+      else {
+        const ObjCInterfaceDecl *BoxClass = 0;
+        if (const ObjCMethodDecl *Method = BL->getBoxingMethod())
+          BoxClass = Method->getClassInterface();
+
+        // We should always be able to find the boxing class interface,
+        // but consider this future-proofing.
+        if (BoxClass)
+          os << *BoxClass << " b";
+        else
+          os << "B";
+
+        os << "oxed expression produces an object with a +0 retain count";
+      }
+    }
+    else {      
+      if (const CallExpr *CE = dyn_cast<CallExpr>(S)) {
+        // Get the name of the callee (if it is available).
+        SVal X = CurrSt->getSValAsScalarOrLoc(CE->getCallee(), LCtx);
+        if (const FunctionDecl *FD = X.getAsFunctionDecl())
+          os << "Call to function '" << *FD << '\'';
+        else
+          os << "function call";
+      }
+      else {
+        assert(isa<ObjCMessageExpr>(S));
+        CallEventManager &Mgr = CurrSt->getStateManager().getCallEventManager();
+        CallEventRef<ObjCMethodCall> Call
+          = Mgr.getObjCMethodCall(cast<ObjCMessageExpr>(S), CurrSt, LCtx);
+
+        switch (Call->getMessageKind()) {
+        case OCM_Message:
+          os << "Method";
+          break;
+        case OCM_PropertyAccess:
+          os << "Property";
+          break;
+        case OCM_Subscript:
+          os << "Subscript";
+          break;
+        }
+      }
+
+      if (CurrV.getObjKind() == RetEffect::CF) {
+        os << " returns a Core Foundation object with a ";
+      }
+      else {
+        assert (CurrV.getObjKind() == RetEffect::ObjC);
+        os << " returns an Objective-C object with a ";
+      }
+
+      if (CurrV.isOwned()) {
+        os << "+1 retain count";
+
+        if (GCEnabled) {
+          assert(CurrV.getObjKind() == RetEffect::CF);
+          os << ".  "
+          "Core Foundation objects are not automatically garbage collected.";
+        }
+      }
+      else {
+        assert (CurrV.isNotOwned());
+        os << "+0 retain count";
+      }
+    }
+
+    PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
+                                  N->getLocationContext());
+    return new PathDiagnosticEventPiece(Pos, os.str());
+  }
+
+  // Gather up the effects that were performed on the object at this
+  // program point
+  SmallVector<ArgEffect, 2> AEffects;
+
+  const ExplodedNode *OrigNode = BRC.getNodeResolver().getOriginalNode(N);
+  if (const RetainSummary *Summ = SummaryLog.lookup(OrigNode)) {
+    // We only have summaries attached to nodes after evaluating CallExpr and
+    // ObjCMessageExprs.
+    const Stmt *S = N->getLocation().castAs<StmtPoint>().getStmt();
+
+    if (const CallExpr *CE = dyn_cast<CallExpr>(S)) {
+      // Iterate through the parameter expressions and see if the symbol
+      // was ever passed as an argument.
+      unsigned i = 0;
+
+      for (CallExpr::const_arg_iterator AI=CE->arg_begin(), AE=CE->arg_end();
+           AI!=AE; ++AI, ++i) {
+
+        // Retrieve the value of the argument.  Is it the symbol
+        // we are interested in?
+        if (CurrSt->getSValAsScalarOrLoc(*AI, LCtx).getAsLocSymbol() != Sym)
+          continue;
+
+        // We have an argument.  Get the effect!
+        AEffects.push_back(Summ->getArg(i));
+      }
+    }
+    else if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S)) {
+      if (const Expr *receiver = ME->getInstanceReceiver())
+        if (CurrSt->getSValAsScalarOrLoc(receiver, LCtx)
+              .getAsLocSymbol() == Sym) {
+          // The symbol we are tracking is the receiver.
+          AEffects.push_back(Summ->getReceiverEffect());
+        }
+    }
+  }
+
+  do {
+    // Get the previous type state.
+    RefVal PrevV = *PrevT;
+
+    // Specially handle -dealloc.
+    if (!GCEnabled && contains(AEffects, Dealloc)) {
+      // Determine if the object's reference count was pushed to zero.
+      assert(!(PrevV == CurrV) && "The typestate *must* have changed.");
+      // We may not have transitioned to 'release' if we hit an error.
+      // This case is handled elsewhere.
+      if (CurrV.getKind() == RefVal::Released) {
+        assert(CurrV.getCombinedCounts() == 0);
+        os << "Object released by directly sending the '-dealloc' message";
+        break;
+      }
+    }
+
+    // Specially handle CFMakeCollectable and friends.
+    if (contains(AEffects, MakeCollectable)) {
+      // Get the name of the function.
+      const Stmt *S = N->getLocation().castAs<StmtPoint>().getStmt();
+      SVal X =
+        CurrSt->getSValAsScalarOrLoc(cast<CallExpr>(S)->getCallee(), LCtx);
+      const FunctionDecl *FD = X.getAsFunctionDecl();
+
+      if (GCEnabled) {
+        // Determine if the object's reference count was pushed to zero.
+        assert(!(PrevV == CurrV) && "The typestate *must* have changed.");
+
+        os << "In GC mode a call to '" << *FD
+        <<  "' decrements an object's retain count and registers the "
+        "object with the garbage collector. ";
+
+        if (CurrV.getKind() == RefVal::Released) {
+          assert(CurrV.getCount() == 0);
+          os << "Since it now has a 0 retain count the object can be "
+          "automatically collected by the garbage collector.";
+        }
+        else
+          os << "An object must have a 0 retain count to be garbage collected. "
+          "After this call its retain count is +" << CurrV.getCount()
+          << '.';
+      }
+      else
+        os << "When GC is not enabled a call to '" << *FD
+        << "' has no effect on its argument.";
+
+      // Nothing more to say.
+      break;
+    }
+
+    // Determine if the typestate has changed.
+    if (!(PrevV == CurrV))
+      switch (CurrV.getKind()) {
+        case RefVal::Owned:
+        case RefVal::NotOwned:
+
+          if (PrevV.getCount() == CurrV.getCount()) {
+            // Did an autorelease message get sent?
+            if (PrevV.getAutoreleaseCount() == CurrV.getAutoreleaseCount())
+              return 0;
+
+            assert(PrevV.getAutoreleaseCount() < CurrV.getAutoreleaseCount());
+            os << "Object autoreleased";
+            break;
+          }
+
+          if (PrevV.getCount() > CurrV.getCount())
+            os << "Reference count decremented.";
+          else
+            os << "Reference count incremented.";
+
+          if (unsigned Count = CurrV.getCount())
+            os << " The object now has a +" << Count << " retain count.";
+
+          if (PrevV.getKind() == RefVal::Released) {
+            assert(GCEnabled && CurrV.getCount() > 0);
+            os << " The object is not eligible for garbage collection until "
+                  "the retain count reaches 0 again.";
+          }
+
+          break;
+
+        case RefVal::Released:
+          os << "Object released.";
+          break;
+
+        case RefVal::ReturnedOwned:
+          // Autoreleases can be applied after marking a node ReturnedOwned.
+          if (CurrV.getAutoreleaseCount())
+            return NULL;
+
+          os << "Object returned to caller as an owning reference (single "
+                "retain count transferred to caller)";
+          break;
+
+        case RefVal::ReturnedNotOwned:
+          os << "Object returned to caller with a +0 retain count";
+          break;
+
+        default:
+          return NULL;
+      }
+
+    // Emit any remaining diagnostics for the argument effects (if any).
+    for (SmallVectorImpl<ArgEffect>::iterator I=AEffects.begin(),
+         E=AEffects.end(); I != E; ++I) {
+
+      // A bunch of things have alternate behavior under GC.
+      if (GCEnabled)
+        switch (*I) {
+          default: break;
+          case Autorelease:
+            os << "In GC mode an 'autorelease' has no effect.";
+            continue;
+          case IncRefMsg:
+            os << "In GC mode the 'retain' message has no effect.";
+            continue;
+          case DecRefMsg:
+            os << "In GC mode the 'release' message has no effect.";
+            continue;
+        }
+    }
+  } while (0);
+
+  if (os.str().empty())
+    return 0; // We have nothing to say!
+
+  const Stmt *S = N->getLocation().castAs<StmtPoint>().getStmt();
+  PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
+                                N->getLocationContext());
+  PathDiagnosticPiece *P = new PathDiagnosticEventPiece(Pos, os.str());
+
+  // Add the range by scanning the children of the statement for any bindings
+  // to Sym.
+  for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
+       I!=E; ++I)
+    if (const Expr *Exp = dyn_cast_or_null<Expr>(*I))
+      if (CurrSt->getSValAsScalarOrLoc(Exp, LCtx).getAsLocSymbol() == Sym) {
+        P->addRange(Exp->getSourceRange());
+        break;
+      }
+
+  return P;
+}
+
+// Find the first node in the current function context that referred to the
+// tracked symbol and the memory location that value was stored to. Note, the
+// value is only reported if the allocation occurred in the same function as
+// the leak. The function can also return a location context, which should be
+// treated as interesting.
+struct AllocationInfo {
+  const ExplodedNode* N;
+  const MemRegion *R;
+  const LocationContext *InterestingMethodContext;
+  AllocationInfo(const ExplodedNode *InN,
+                 const MemRegion *InR,
+                 const LocationContext *InInterestingMethodContext) :
+    N(InN), R(InR), InterestingMethodContext(InInterestingMethodContext) {}
+};
+
+static AllocationInfo
+GetAllocationSite(ProgramStateManager& StateMgr, const ExplodedNode *N,
+                  SymbolRef Sym) {
+  const ExplodedNode *AllocationNode = N;
+  const ExplodedNode *AllocationNodeInCurrentContext = N;
+  const MemRegion* FirstBinding = 0;
+  const LocationContext *LeakContext = N->getLocationContext();
+
+  // The location context of the init method called on the leaked object, if
+  // available.
+  const LocationContext *InitMethodContext = 0;
+
+  while (N) {
+    ProgramStateRef St = N->getState();
+    const LocationContext *NContext = N->getLocationContext();
+
+    if (!getRefBinding(St, Sym))
+      break;
+
+    StoreManager::FindUniqueBinding FB(Sym);
+    StateMgr.iterBindings(St, FB);
+    
+    if (FB) {
+      const MemRegion *R = FB.getRegion();
+      const VarRegion *VR = R->getBaseRegion()->getAs<VarRegion>();
+      // Do not show local variables belonging to a function other than
+      // where the error is reported.
+      if (!VR || VR->getStackFrame() == LeakContext->getCurrentStackFrame())
+        FirstBinding = R;
+    }
+
+    // AllocationNode is the last node in which the symbol was tracked.
+    AllocationNode = N;
+
+    // AllocationNodeInCurrentContext, is the last node in the current context
+    // in which the symbol was tracked.
+    if (NContext == LeakContext)
+      AllocationNodeInCurrentContext = N;
+
+    // Find the last init that was called on the given symbol and store the
+    // init method's location context.
+    if (!InitMethodContext)
+      if (Optional<CallEnter> CEP = N->getLocation().getAs<CallEnter>()) {
+        const Stmt *CE = CEP->getCallExpr();
+        if (const ObjCMessageExpr *ME = dyn_cast_or_null<ObjCMessageExpr>(CE)) {
+          const Stmt *RecExpr = ME->getInstanceReceiver();
+          if (RecExpr) {
+            SVal RecV = St->getSVal(RecExpr, NContext);
+            if (ME->getMethodFamily() == OMF_init && RecV.getAsSymbol() == Sym)
+              InitMethodContext = CEP->getCalleeContext();
+          }
+        }
+      }
+
+    N = N->pred_empty() ? NULL : *(N->pred_begin());
+  }
+
+  // If we are reporting a leak of the object that was allocated with alloc,
+  // mark its init method as interesting.
+  const LocationContext *InterestingMethodContext = 0;
+  if (InitMethodContext) {
+    const ProgramPoint AllocPP = AllocationNode->getLocation();
+    if (Optional<StmtPoint> SP = AllocPP.getAs<StmtPoint>())
+      if (const ObjCMessageExpr *ME = SP->getStmtAs<ObjCMessageExpr>())
+        if (ME->getMethodFamily() == OMF_alloc)
+          InterestingMethodContext = InitMethodContext;
+  }
+
+  // If allocation happened in a function different from the leak node context,
+  // do not report the binding.
+  assert(N && "Could not find allocation node");
+  if (N->getLocationContext() != LeakContext) {
+    FirstBinding = 0;
+  }
+
+  return AllocationInfo(AllocationNodeInCurrentContext,
+                        FirstBinding,
+                        InterestingMethodContext);
+}
+
+PathDiagnosticPiece*
+CFRefReportVisitor::getEndPath(BugReporterContext &BRC,
+                               const ExplodedNode *EndN,
+                               BugReport &BR) {
+  BR.markInteresting(Sym);
+  return BugReporterVisitor::getDefaultEndPath(BRC, EndN, BR);
+}
+
+PathDiagnosticPiece*
+CFRefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
+                                   const ExplodedNode *EndN,
+                                   BugReport &BR) {
+
+  // Tell the BugReporterContext to report cases when the tracked symbol is
+  // assigned to different variables, etc.
+  BR.markInteresting(Sym);
+
+  // We are reporting a leak.  Walk up the graph to get to the first node where
+  // the symbol appeared, and also get the first VarDecl that tracked object
+  // is stored to.
+  AllocationInfo AllocI =
+    GetAllocationSite(BRC.getStateManager(), EndN, Sym);
+
+  const MemRegion* FirstBinding = AllocI.R;
+  BR.markInteresting(AllocI.InterestingMethodContext);
+
+  SourceManager& SM = BRC.getSourceManager();
+
+  // Compute an actual location for the leak.  Sometimes a leak doesn't
+  // occur at an actual statement (e.g., transition between blocks; end
+  // of function) so we need to walk the graph and compute a real location.
+  const ExplodedNode *LeakN = EndN;
+  PathDiagnosticLocation L = PathDiagnosticLocation::createEndOfPath(LeakN, SM);
+
+  std::string sbuf;
+  llvm::raw_string_ostream os(sbuf);
+
+  os << "Object leaked: ";
+
+  if (FirstBinding) {
+    os << "object allocated and stored into '"
+       << FirstBinding->getString() << '\'';
+  }
+  else
+    os << "allocated object";
+
+  // Get the retain count.
+  const RefVal* RV = getRefBinding(EndN->getState(), Sym);
+  assert(RV);
+
+  if (RV->getKind() == RefVal::ErrorLeakReturned) {
+    // FIXME: Per comments in rdar://6320065, "create" only applies to CF
+    // objects.  Only "copy", "alloc", "retain" and "new" transfer ownership
+    // to the caller for NS objects.
+    const Decl *D = &EndN->getCodeDecl();
+    
+    os << (isa<ObjCMethodDecl>(D) ? " is returned from a method "
+                                  : " is returned from a function ");
+    
+    if (D->getAttr<CFReturnsNotRetainedAttr>())
+      os << "that is annotated as CF_RETURNS_NOT_RETAINED";
+    else if (D->getAttr<NSReturnsNotRetainedAttr>())
+      os << "that is annotated as NS_RETURNS_NOT_RETAINED";
+    else {
+      if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+        os << "whose name ('" << MD->getSelector().getAsString()
+           << "') does not start with 'copy', 'mutableCopy', 'alloc' or 'new'."
+              "  This violates the naming convention rules"
+              " given in the Memory Management Guide for Cocoa";
+      }
+      else {
+        const FunctionDecl *FD = cast<FunctionDecl>(D);
+        os << "whose name ('" << *FD
+           << "') does not contain 'Copy' or 'Create'.  This violates the naming"
+              " convention rules given in the Memory Management Guide for Core"
+              " Foundation";
+      }
+    }
+  }
+  else if (RV->getKind() == RefVal::ErrorGCLeakReturned) {
+    const ObjCMethodDecl &MD = cast<ObjCMethodDecl>(EndN->getCodeDecl());
+    os << " and returned from method '" << MD.getSelector().getAsString()
+       << "' is potentially leaked when using garbage collection.  Callers "
+          "of this method do not expect a returned object with a +1 retain "
+          "count since they expect the object to be managed by the garbage "
+          "collector";
+  }
+  else
+    os << " is not referenced later in this execution path and has a retain "
+          "count of +" << RV->getCount();
+
+  return new PathDiagnosticEventPiece(L, os.str());
+}
+
+CFRefLeakReport::CFRefLeakReport(CFRefBug &D, const LangOptions &LOpts,
+                                 bool GCEnabled, const SummaryLogTy &Log, 
+                                 ExplodedNode *n, SymbolRef sym,
+                                 CheckerContext &Ctx,
+                                 bool IncludeAllocationLine)
+  : CFRefReport(D, LOpts, GCEnabled, Log, n, sym, false) {
+
+  // Most bug reports are cached at the location where they occurred.
+  // With leaks, we want to unique them by the location where they were
+  // allocated, and only report a single path.  To do this, we need to find
+  // the allocation site of a piece of tracked memory, which we do via a
+  // call to GetAllocationSite.  This will walk the ExplodedGraph backwards.
+  // Note that this is *not* the trimmed graph; we are guaranteed, however,
+  // that all ancestor nodes that represent the allocation site have the
+  // same SourceLocation.
+  const ExplodedNode *AllocNode = 0;
+
+  const SourceManager& SMgr = Ctx.getSourceManager();
+
+  AllocationInfo AllocI =
+    GetAllocationSite(Ctx.getStateManager(), getErrorNode(), sym);
+
+  AllocNode = AllocI.N;
+  AllocBinding = AllocI.R;
+  markInteresting(AllocI.InterestingMethodContext);
+
+  // Get the SourceLocation for the allocation site.
+  // FIXME: This will crash the analyzer if an allocation comes from an
+  // implicit call. (Currently there are no such allocations in Cocoa, though.)
+  const Stmt *AllocStmt;
+  ProgramPoint P = AllocNode->getLocation();
+  if (Optional<CallExitEnd> Exit = P.getAs<CallExitEnd>())
+    AllocStmt = Exit->getCalleeContext()->getCallSite();
+  else
+    AllocStmt = P.castAs<PostStmt>().getStmt();
+  assert(AllocStmt && "All allocations must come from explicit calls");
+
+  PathDiagnosticLocation AllocLocation =
+    PathDiagnosticLocation::createBegin(AllocStmt, SMgr,
+                                        AllocNode->getLocationContext());
+  Location = AllocLocation;
+
+  // Set uniqieing info, which will be used for unique the bug reports. The
+  // leaks should be uniqued on the allocation site.
+  UniqueingLocation = AllocLocation;
+  UniqueingDecl = AllocNode->getLocationContext()->getDecl();
+
+  // Fill in the description of the bug.
+  Description.clear();
+  llvm::raw_string_ostream os(Description);
+  os << "Potential leak ";
+  if (GCEnabled)
+    os << "(when using garbage collection) ";
+  os << "of an object";
+
+  if (AllocBinding) {
+    os << " stored into '" << AllocBinding->getString() << '\'';
+    if (IncludeAllocationLine) {
+      FullSourceLoc SL(AllocStmt->getLocStart(), Ctx.getSourceManager());
+      os << " (allocated on line " << SL.getSpellingLineNumber() << ")";
+    }
+  }
+
+  addVisitor(new CFRefLeakReportVisitor(sym, GCEnabled, Log));
+}
+
+//===----------------------------------------------------------------------===//
+// Main checker logic.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class RetainCountChecker
+  : public Checker< check::Bind,
+                    check::DeadSymbols,
+                    check::EndAnalysis,
+                    check::EndFunction,
+                    check::PostStmt<BlockExpr>,
+                    check::PostStmt<CastExpr>,
+                    check::PostStmt<ObjCArrayLiteral>,
+                    check::PostStmt<ObjCDictionaryLiteral>,
+                    check::PostStmt<ObjCBoxedExpr>,
+                    check::PostCall,
+                    check::PreStmt<ReturnStmt>,
+                    check::RegionChanges,
+                    eval::Assume,
+                    eval::Call > {
+  mutable OwningPtr<CFRefBug> useAfterRelease, releaseNotOwned;
+  mutable OwningPtr<CFRefBug> deallocGC, deallocNotOwned;
+  mutable OwningPtr<CFRefBug> overAutorelease, returnNotOwnedForOwned;
+  mutable OwningPtr<CFRefBug> leakWithinFunction, leakAtReturn;
+  mutable OwningPtr<CFRefBug> leakWithinFunctionGC, leakAtReturnGC;
+
+  typedef llvm::DenseMap<SymbolRef, const SimpleProgramPointTag *> SymbolTagMap;
+
+  // This map is only used to ensure proper deletion of any allocated tags.
+  mutable SymbolTagMap DeadSymbolTags;
+
+  mutable OwningPtr<RetainSummaryManager> Summaries;
+  mutable OwningPtr<RetainSummaryManager> SummariesGC;
+  mutable SummaryLogTy SummaryLog;
+  mutable bool ShouldResetSummaryLog;
+
+  /// Optional setting to indicate if leak reports should include
+  /// the allocation line.
+  mutable bool IncludeAllocationLine;
+
+public:  
+  RetainCountChecker(AnalyzerOptions &AO)
+    : ShouldResetSummaryLog(false),
+      IncludeAllocationLine(shouldIncludeAllocationSiteInLeakDiagnostics(AO)) {}
+
+  virtual ~RetainCountChecker() {
+    DeleteContainerSeconds(DeadSymbolTags);
+  }
+
+  void checkEndAnalysis(ExplodedGraph &G, BugReporter &BR,
+                        ExprEngine &Eng) const {
+    // FIXME: This is a hack to make sure the summary log gets cleared between
+    // analyses of different code bodies.
+    //
+    // Why is this necessary? Because a checker's lifetime is tied to a
+    // translation unit, but an ExplodedGraph's lifetime is just a code body.
+    // Once in a blue moon, a new ExplodedNode will have the same address as an
+    // old one with an associated summary, and the bug report visitor gets very
+    // confused. (To make things worse, the summary lifetime is currently also
+    // tied to a code body, so we get a crash instead of incorrect results.)
+    //
+    // Why is this a bad solution? Because if the lifetime of the ExplodedGraph
+    // changes, things will start going wrong again. Really the lifetime of this
+    // log needs to be tied to either the specific nodes in it or the entire
+    // ExplodedGraph, not to a specific part of the code being analyzed.
+    //
+    // (Also, having stateful local data means that the same checker can't be
+    // used from multiple threads, but a lot of checkers have incorrect
+    // assumptions about that anyway. So that wasn't a priority at the time of
+    // this fix.)
+    //
+    // This happens at the end of analysis, but bug reports are emitted /after/
+    // this point. So we can't just clear the summary log now. Instead, we mark
+    // that the next time we access the summary log, it should be cleared.
+
+    // If we never reset the summary log during /this/ code body analysis,
+    // there were no new summaries. There might still have been summaries from
+    // the /last/ analysis, so clear them out to make sure the bug report
+    // visitors don't get confused.
+    if (ShouldResetSummaryLog)
+      SummaryLog.clear();
+
+    ShouldResetSummaryLog = !SummaryLog.empty();
+  }
+
+  CFRefBug *getLeakWithinFunctionBug(const LangOptions &LOpts,
+                                     bool GCEnabled) const {
+    if (GCEnabled) {
+      if (!leakWithinFunctionGC)
+        leakWithinFunctionGC.reset(new Leak("Leak of object when using "
+                                             "garbage collection"));
+      return leakWithinFunctionGC.get();
+    } else {
+      if (!leakWithinFunction) {
+        if (LOpts.getGC() == LangOptions::HybridGC) {
+          leakWithinFunction.reset(new Leak("Leak of object when not using "
+                                            "garbage collection (GC) in "
+                                            "dual GC/non-GC code"));
+        } else {
+          leakWithinFunction.reset(new Leak("Leak"));
+        }
+      }
+      return leakWithinFunction.get();
+    }
+  }
+
+  CFRefBug *getLeakAtReturnBug(const LangOptions &LOpts, bool GCEnabled) const {
+    if (GCEnabled) {
+      if (!leakAtReturnGC)
+        leakAtReturnGC.reset(new Leak("Leak of returned object when using "
+                                      "garbage collection"));
+      return leakAtReturnGC.get();
+    } else {
+      if (!leakAtReturn) {
+        if (LOpts.getGC() == LangOptions::HybridGC) {
+          leakAtReturn.reset(new Leak("Leak of returned object when not using "
+                                      "garbage collection (GC) in dual "
+                                      "GC/non-GC code"));
+        } else {
+          leakAtReturn.reset(new Leak("Leak of returned object"));
+        }
+      }
+      return leakAtReturn.get();
+    }
+  }
+
+  RetainSummaryManager &getSummaryManager(ASTContext &Ctx,
+                                          bool GCEnabled) const {
+    // FIXME: We don't support ARC being turned on and off during one analysis.
+    // (nor, for that matter, do we support changing ASTContexts)
+    bool ARCEnabled = (bool)Ctx.getLangOpts().ObjCAutoRefCount;
+    if (GCEnabled) {
+      if (!SummariesGC)
+        SummariesGC.reset(new RetainSummaryManager(Ctx, true, ARCEnabled));
+      else
+        assert(SummariesGC->isARCEnabled() == ARCEnabled);
+      return *SummariesGC;
+    } else {
+      if (!Summaries)
+        Summaries.reset(new RetainSummaryManager(Ctx, false, ARCEnabled));
+      else
+        assert(Summaries->isARCEnabled() == ARCEnabled);
+      return *Summaries;
+    }
+  }
+
+  RetainSummaryManager &getSummaryManager(CheckerContext &C) const {
+    return getSummaryManager(C.getASTContext(), C.isObjCGCEnabled());
+  }
+
+  void printState(raw_ostream &Out, ProgramStateRef State,
+                  const char *NL, const char *Sep) const;
+
+  void checkBind(SVal loc, SVal val, const Stmt *S, CheckerContext &C) const;
+  void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const;
+  void checkPostStmt(const CastExpr *CE, CheckerContext &C) const;
+
+  void checkPostStmt(const ObjCArrayLiteral *AL, CheckerContext &C) const;
+  void checkPostStmt(const ObjCDictionaryLiteral *DL, CheckerContext &C) const;
+  void checkPostStmt(const ObjCBoxedExpr *BE, CheckerContext &C) const;
+
+  void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+                      
+  void checkSummary(const RetainSummary &Summ, const CallEvent &Call,
+                    CheckerContext &C) const;
+
+  void processSummaryOfInlined(const RetainSummary &Summ,
+                               const CallEvent &Call,
+                               CheckerContext &C) const;
+
+  bool evalCall(const CallExpr *CE, CheckerContext &C) const;
+
+  ProgramStateRef evalAssume(ProgramStateRef state, SVal Cond,
+                                 bool Assumption) const;
+
+  ProgramStateRef 
+  checkRegionChanges(ProgramStateRef state,
+                     const InvalidatedSymbols *invalidated,
+                     ArrayRef<const MemRegion *> ExplicitRegions,
+                     ArrayRef<const MemRegion *> Regions,
+                     const CallEvent *Call) const;
+                                        
+  bool wantsRegionChangeUpdate(ProgramStateRef state) const {
+    return true;
+  }
+
+  void checkPreStmt(const ReturnStmt *S, CheckerContext &C) const;
+  void checkReturnWithRetEffect(const ReturnStmt *S, CheckerContext &C,
+                                ExplodedNode *Pred, RetEffect RE, RefVal X,
+                                SymbolRef Sym, ProgramStateRef state) const;
+                                              
+  void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
+  void checkEndFunction(CheckerContext &C) const;
+
+  ProgramStateRef updateSymbol(ProgramStateRef state, SymbolRef sym,
+                               RefVal V, ArgEffect E, RefVal::Kind &hasErr,
+                               CheckerContext &C) const;
+
+  void processNonLeakError(ProgramStateRef St, SourceRange ErrorRange,
+                           RefVal::Kind ErrorKind, SymbolRef Sym,
+                           CheckerContext &C) const;
+                      
+  void processObjCLiterals(CheckerContext &C, const Expr *Ex) const;
+
+  const ProgramPointTag *getDeadSymbolTag(SymbolRef sym) const;
+
+  ProgramStateRef handleSymbolDeath(ProgramStateRef state,
+                                    SymbolRef sid, RefVal V,
+                                    SmallVectorImpl<SymbolRef> &Leaked) const;
+
+  ProgramStateRef
+  handleAutoreleaseCounts(ProgramStateRef state, ExplodedNode *Pred,
+                          const ProgramPointTag *Tag, CheckerContext &Ctx,
+                          SymbolRef Sym, RefVal V) const;
+
+  ExplodedNode *processLeaks(ProgramStateRef state,
+                             SmallVectorImpl<SymbolRef> &Leaked,
+                             CheckerContext &Ctx,
+                             ExplodedNode *Pred = 0) const;
+};
+} // end anonymous namespace
+
+namespace {
+class StopTrackingCallback : public SymbolVisitor {
+  ProgramStateRef state;
+public:
+  StopTrackingCallback(ProgramStateRef st) : state(st) {}
+  ProgramStateRef getState() const { return state; }
+
+  bool VisitSymbol(SymbolRef sym) {
+    state = state->remove<RefBindings>(sym);
+    return true;
+  }
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Handle statements that may have an effect on refcounts.
+//===----------------------------------------------------------------------===//
+
+void RetainCountChecker::checkPostStmt(const BlockExpr *BE,
+                                       CheckerContext &C) const {
+
+  // Scan the BlockDecRefExprs for any object the retain count checker
+  // may be tracking.
+  if (!BE->getBlockDecl()->hasCaptures())
+    return;
+
+  ProgramStateRef state = C.getState();
+  const BlockDataRegion *R =
+    cast<BlockDataRegion>(state->getSVal(BE,
+                                         C.getLocationContext()).getAsRegion());
+
+  BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(),
+                                            E = R->referenced_vars_end();
+
+  if (I == E)
+    return;
+
+  // FIXME: For now we invalidate the tracking of all symbols passed to blocks
+  // via captured variables, even though captured variables result in a copy
+  // and in implicit increment/decrement of a retain count.
+  SmallVector<const MemRegion*, 10> Regions;
+  const LocationContext *LC = C.getLocationContext();
+  MemRegionManager &MemMgr = C.getSValBuilder().getRegionManager();
+
+  for ( ; I != E; ++I) {
+    const VarRegion *VR = I.getCapturedRegion();
+    if (VR->getSuperRegion() == R) {
+      VR = MemMgr.getVarRegion(VR->getDecl(), LC);
+    }
+    Regions.push_back(VR);
+  }
+
+  state =
+    state->scanReachableSymbols<StopTrackingCallback>(Regions.data(),
+                                    Regions.data() + Regions.size()).getState();
+  C.addTransition(state);
+}
+
+void RetainCountChecker::checkPostStmt(const CastExpr *CE,
+                                       CheckerContext &C) const {
+  const ObjCBridgedCastExpr *BE = dyn_cast<ObjCBridgedCastExpr>(CE);
+  if (!BE)
+    return;
+  
+  ArgEffect AE = IncRef;
+  
+  switch (BE->getBridgeKind()) {
+    case clang::OBC_Bridge:
+      // Do nothing.
+      return;
+    case clang::OBC_BridgeRetained:
+      AE = IncRef;
+      break;      
+    case clang::OBC_BridgeTransfer:
+      AE = DecRefBridgedTransfered;
+      break;
+  }
+  
+  ProgramStateRef state = C.getState();
+  SymbolRef Sym = state->getSVal(CE, C.getLocationContext()).getAsLocSymbol();
+  if (!Sym)
+    return;
+  const RefVal* T = getRefBinding(state, Sym);
+  if (!T)
+    return;
+
+  RefVal::Kind hasErr = (RefVal::Kind) 0;
+  state = updateSymbol(state, Sym, *T, AE, hasErr, C);
+  
+  if (hasErr) {
+    // FIXME: If we get an error during a bridge cast, should we report it?
+    // Should we assert that there is no error?
+    return;
+  }
+
+  C.addTransition(state);
+}
+
+void RetainCountChecker::processObjCLiterals(CheckerContext &C,
+                                             const Expr *Ex) const {
+  ProgramStateRef state = C.getState();
+  const ExplodedNode *pred = C.getPredecessor();  
+  for (Stmt::const_child_iterator it = Ex->child_begin(), et = Ex->child_end() ;
+       it != et ; ++it) {
+    const Stmt *child = *it;
+    SVal V = state->getSVal(child, pred->getLocationContext());
+    if (SymbolRef sym = V.getAsSymbol())
+      if (const RefVal* T = getRefBinding(state, sym)) {
+        RefVal::Kind hasErr = (RefVal::Kind) 0;
+        state = updateSymbol(state, sym, *T, MayEscape, hasErr, C);
+        if (hasErr) {
+          processNonLeakError(state, child->getSourceRange(), hasErr, sym, C);
+          return;
+        }
+      }
+  }
+  
+  // Return the object as autoreleased.
+  //  RetEffect RE = RetEffect::MakeNotOwned(RetEffect::ObjC);
+  if (SymbolRef sym = 
+        state->getSVal(Ex, pred->getLocationContext()).getAsSymbol()) {
+    QualType ResultTy = Ex->getType();
+    state = setRefBinding(state, sym,
+                          RefVal::makeNotOwned(RetEffect::ObjC, ResultTy));
+  }
+  
+  C.addTransition(state);  
+}
+
+void RetainCountChecker::checkPostStmt(const ObjCArrayLiteral *AL,
+                                       CheckerContext &C) const {
+  // Apply the 'MayEscape' to all values.
+  processObjCLiterals(C, AL);
+}
+
+void RetainCountChecker::checkPostStmt(const ObjCDictionaryLiteral *DL,
+                                       CheckerContext &C) const {
+  // Apply the 'MayEscape' to all keys and values.
+  processObjCLiterals(C, DL);
+}
+
+void RetainCountChecker::checkPostStmt(const ObjCBoxedExpr *Ex,
+                                       CheckerContext &C) const {
+  const ExplodedNode *Pred = C.getPredecessor();  
+  const LocationContext *LCtx = Pred->getLocationContext();
+  ProgramStateRef State = Pred->getState();
+
+  if (SymbolRef Sym = State->getSVal(Ex, LCtx).getAsSymbol()) {
+    QualType ResultTy = Ex->getType();
+    State = setRefBinding(State, Sym,
+                          RefVal::makeNotOwned(RetEffect::ObjC, ResultTy));
+  }
+
+  C.addTransition(State);
+}
+
+void RetainCountChecker::checkPostCall(const CallEvent &Call,
+                                       CheckerContext &C) const {
+  RetainSummaryManager &Summaries = getSummaryManager(C);
+  const RetainSummary *Summ = Summaries.getSummary(Call, C.getState());
+
+  if (C.wasInlined) {
+    processSummaryOfInlined(*Summ, Call, C);
+    return;
+  }
+  checkSummary(*Summ, Call, C);
+}
+
+/// GetReturnType - Used to get the return type of a message expression or
+///  function call with the intention of affixing that type to a tracked symbol.
+///  While the return type can be queried directly from RetEx, when
+///  invoking class methods we augment to the return type to be that of
+///  a pointer to the class (as opposed it just being id).
+// FIXME: We may be able to do this with related result types instead.
+// This function is probably overestimating.
+static QualType GetReturnType(const Expr *RetE, ASTContext &Ctx) {
+  QualType RetTy = RetE->getType();
+  // If RetE is not a message expression just return its type.
+  // If RetE is a message expression, return its types if it is something
+  /// more specific than id.
+  if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(RetE))
+    if (const ObjCObjectPointerType *PT = RetTy->getAs<ObjCObjectPointerType>())
+      if (PT->isObjCQualifiedIdType() || PT->isObjCIdType() ||
+          PT->isObjCClassType()) {
+        // At this point we know the return type of the message expression is
+        // id, id<...>, or Class. If we have an ObjCInterfaceDecl, we know this
+        // is a call to a class method whose type we can resolve.  In such
+        // cases, promote the return type to XXX* (where XXX is the class).
+        const ObjCInterfaceDecl *D = ME->getReceiverInterface();
+        return !D ? RetTy :
+                    Ctx.getObjCObjectPointerType(Ctx.getObjCInterfaceType(D));
+      }
+
+  return RetTy;
+}
+
+// We don't always get the exact modeling of the function with regards to the
+// retain count checker even when the function is inlined. For example, we need
+// to stop tracking the symbols which were marked with StopTrackingHard.
+void RetainCountChecker::processSummaryOfInlined(const RetainSummary &Summ,
+                                                 const CallEvent &CallOrMsg,
+                                                 CheckerContext &C) const {
+  ProgramStateRef state = C.getState();
+
+  // Evaluate the effect of the arguments.
+  for (unsigned idx = 0, e = CallOrMsg.getNumArgs(); idx != e; ++idx) {
+    if (Summ.getArg(idx) == StopTrackingHard) {
+      SVal V = CallOrMsg.getArgSVal(idx);
+      if (SymbolRef Sym = V.getAsLocSymbol()) {
+        state = removeRefBinding(state, Sym);
+      }
+    }
+  }
+
+  // Evaluate the effect on the message receiver.
+  const ObjCMethodCall *MsgInvocation = dyn_cast<ObjCMethodCall>(&CallOrMsg);
+  if (MsgInvocation) {
+    if (SymbolRef Sym = MsgInvocation->getReceiverSVal().getAsLocSymbol()) {
+      if (Summ.getReceiverEffect() == StopTrackingHard) {
+        state = removeRefBinding(state, Sym);
+      }
+    }
+  }
+
+  // Consult the summary for the return value.
+  RetEffect RE = Summ.getRetEffect();
+  if (RE.getKind() == RetEffect::NoRetHard) {
+    SymbolRef Sym = CallOrMsg.getReturnValue().getAsSymbol();
+    if (Sym)
+      state = removeRefBinding(state, Sym);
+  }
+  
+  C.addTransition(state);
+}
+
+void RetainCountChecker::checkSummary(const RetainSummary &Summ,
+                                      const CallEvent &CallOrMsg,
+                                      CheckerContext &C) const {
+  ProgramStateRef state = C.getState();
+
+  // Evaluate the effect of the arguments.
+  RefVal::Kind hasErr = (RefVal::Kind) 0;
+  SourceRange ErrorRange;
+  SymbolRef ErrorSym = 0;
+
+  for (unsigned idx = 0, e = CallOrMsg.getNumArgs(); idx != e; ++idx) {
+    SVal V = CallOrMsg.getArgSVal(idx);
+
+    if (SymbolRef Sym = V.getAsLocSymbol()) {
+      if (const RefVal *T = getRefBinding(state, Sym)) {
+        state = updateSymbol(state, Sym, *T, Summ.getArg(idx), hasErr, C);
+        if (hasErr) {
+          ErrorRange = CallOrMsg.getArgSourceRange(idx);
+          ErrorSym = Sym;
+          break;
+        }
+      }
+    }
+  }
+
+  // Evaluate the effect on the message receiver.
+  bool ReceiverIsTracked = false;
+  if (!hasErr) {
+    const ObjCMethodCall *MsgInvocation = dyn_cast<ObjCMethodCall>(&CallOrMsg);
+    if (MsgInvocation) {
+      if (SymbolRef Sym = MsgInvocation->getReceiverSVal().getAsLocSymbol()) {
+        if (const RefVal *T = getRefBinding(state, Sym)) {
+          ReceiverIsTracked = true;
+          state = updateSymbol(state, Sym, *T, Summ.getReceiverEffect(),
+                                 hasErr, C);
+          if (hasErr) {
+            ErrorRange = MsgInvocation->getOriginExpr()->getReceiverRange();
+            ErrorSym = Sym;
+          }
+        }
+      }
+    }
+  }
+
+  // Process any errors.
+  if (hasErr) {
+    processNonLeakError(state, ErrorRange, hasErr, ErrorSym, C);
+    return;
+  }
+
+  // Consult the summary for the return value.
+  RetEffect RE = Summ.getRetEffect();
+
+  if (RE.getKind() == RetEffect::OwnedWhenTrackedReceiver) {
+    if (ReceiverIsTracked)
+      RE = getSummaryManager(C).getObjAllocRetEffect();      
+    else
+      RE = RetEffect::MakeNoRet();
+  }
+
+  switch (RE.getKind()) {
+    default:
+      llvm_unreachable("Unhandled RetEffect.");
+
+    case RetEffect::NoRet:
+    case RetEffect::NoRetHard:
+      // No work necessary.
+      break;
+
+    case RetEffect::OwnedAllocatedSymbol:
+    case RetEffect::OwnedSymbol: {
+      SymbolRef Sym = CallOrMsg.getReturnValue().getAsSymbol();
+      if (!Sym)
+        break;
+
+      // Use the result type from the CallEvent as it automatically adjusts
+      // for methods/functions that return references.
+      QualType ResultTy = CallOrMsg.getResultType();
+      state = setRefBinding(state, Sym, RefVal::makeOwned(RE.getObjKind(),
+                                                          ResultTy));
+
+      // FIXME: Add a flag to the checker where allocations are assumed to
+      // *not* fail.
+      break;
+    }
+
+    case RetEffect::GCNotOwnedSymbol:
+    case RetEffect::ARCNotOwnedSymbol:
+    case RetEffect::NotOwnedSymbol: {
+      const Expr *Ex = CallOrMsg.getOriginExpr();
+      SymbolRef Sym = CallOrMsg.getReturnValue().getAsSymbol();
+      if (!Sym)
+        break;
+      assert(Ex);
+      // Use GetReturnType in order to give [NSFoo alloc] the type NSFoo *.
+      QualType ResultTy = GetReturnType(Ex, C.getASTContext());
+      state = setRefBinding(state, Sym, RefVal::makeNotOwned(RE.getObjKind(),
+                                                             ResultTy));
+      break;
+    }
+  }
+
+  // This check is actually necessary; otherwise the statement builder thinks
+  // we've hit a previously-found path.
+  // Normally addTransition takes care of this, but we want the node pointer.
+  ExplodedNode *NewNode;
+  if (state == C.getState()) {
+    NewNode = C.getPredecessor();
+  } else {
+    NewNode = C.addTransition(state);
+  }
+
+  // Annotate the node with summary we used.
+  if (NewNode) {
+    // FIXME: This is ugly. See checkEndAnalysis for why it's necessary.
+    if (ShouldResetSummaryLog) {
+      SummaryLog.clear();
+      ShouldResetSummaryLog = false;
+    }
+    SummaryLog[NewNode] = &Summ;
+  }
+}
+
+
+ProgramStateRef 
+RetainCountChecker::updateSymbol(ProgramStateRef state, SymbolRef sym,
+                                 RefVal V, ArgEffect E, RefVal::Kind &hasErr,
+                                 CheckerContext &C) const {
+  // In GC mode [... release] and [... retain] do nothing.
+  // In ARC mode they shouldn't exist at all, but we just ignore them.
+  bool IgnoreRetainMsg = C.isObjCGCEnabled();
+  if (!IgnoreRetainMsg)
+    IgnoreRetainMsg = (bool)C.getASTContext().getLangOpts().ObjCAutoRefCount;
+
+  switch (E) {
+  default:
+    break;
+  case IncRefMsg:
+    E = IgnoreRetainMsg ? DoNothing : IncRef;
+    break;
+  case DecRefMsg:
+    E = IgnoreRetainMsg ? DoNothing : DecRef;
+    break;
+  case DecRefMsgAndStopTrackingHard:
+    E = IgnoreRetainMsg ? StopTracking : DecRefAndStopTrackingHard;
+    break;
+  case MakeCollectable:
+    E = C.isObjCGCEnabled() ? DecRef : DoNothing;
+    break;
+  }
+
+  // Handle all use-after-releases.
+  if (!C.isObjCGCEnabled() && V.getKind() == RefVal::Released) {
+    V = V ^ RefVal::ErrorUseAfterRelease;
+    hasErr = V.getKind();
+    return setRefBinding(state, sym, V);
+  }
+
+  switch (E) {
+    case DecRefMsg:
+    case IncRefMsg:
+    case MakeCollectable:
+    case DecRefMsgAndStopTrackingHard:
+      llvm_unreachable("DecRefMsg/IncRefMsg/MakeCollectable already converted");
+
+    case Dealloc:
+      // Any use of -dealloc in GC is *bad*.
+      if (C.isObjCGCEnabled()) {
+        V = V ^ RefVal::ErrorDeallocGC;
+        hasErr = V.getKind();
+        break;
+      }
+
+      switch (V.getKind()) {
+        default:
+          llvm_unreachable("Invalid RefVal state for an explicit dealloc.");
+        case RefVal::Owned:
+          // The object immediately transitions to the released state.
+          V = V ^ RefVal::Released;
+          V.clearCounts();
+          return setRefBinding(state, sym, V);
+        case RefVal::NotOwned:
+          V = V ^ RefVal::ErrorDeallocNotOwned;
+          hasErr = V.getKind();
+          break;
+      }
+      break;
+
+    case MayEscape:
+      if (V.getKind() == RefVal::Owned) {
+        V = V ^ RefVal::NotOwned;
+        break;
+      }
+
+      // Fall-through.
+
+    case DoNothing:
+      return state;
+
+    case Autorelease:
+      if (C.isObjCGCEnabled())
+        return state;
+      // Update the autorelease counts.
+      V = V.autorelease();
+      break;
+
+    case StopTracking:
+    case StopTrackingHard:
+      return removeRefBinding(state, sym);
+
+    case IncRef:
+      switch (V.getKind()) {
+        default:
+          llvm_unreachable("Invalid RefVal state for a retain.");
+        case RefVal::Owned:
+        case RefVal::NotOwned:
+          V = V + 1;
+          break;
+        case RefVal::Released:
+          // Non-GC cases are handled above.
+          assert(C.isObjCGCEnabled());
+          V = (V ^ RefVal::Owned) + 1;
+          break;
+      }
+      break;
+
+    case DecRef:
+    case DecRefBridgedTransfered:
+    case DecRefAndStopTrackingHard:
+      switch (V.getKind()) {
+        default:
+          // case 'RefVal::Released' handled above.
+          llvm_unreachable("Invalid RefVal state for a release.");
+
+        case RefVal::Owned:
+          assert(V.getCount() > 0);
+          if (V.getCount() == 1)
+            V = V ^ (E == DecRefBridgedTransfered ? 
+                      RefVal::NotOwned : RefVal::Released);
+          else if (E == DecRefAndStopTrackingHard)
+            return removeRefBinding(state, sym);
+
+          V = V - 1;
+          break;
+
+        case RefVal::NotOwned:
+          if (V.getCount() > 0) {
+            if (E == DecRefAndStopTrackingHard)
+              return removeRefBinding(state, sym);
+            V = V - 1;
+          } else {
+            V = V ^ RefVal::ErrorReleaseNotOwned;
+            hasErr = V.getKind();
+          }
+          break;
+
+        case RefVal::Released:
+          // Non-GC cases are handled above.
+          assert(C.isObjCGCEnabled());
+          V = V ^ RefVal::ErrorUseAfterRelease;
+          hasErr = V.getKind();
+          break;
+      }
+      break;
+  }
+  return setRefBinding(state, sym, V);
+}
+
+void RetainCountChecker::processNonLeakError(ProgramStateRef St,
+                                             SourceRange ErrorRange,
+                                             RefVal::Kind ErrorKind,
+                                             SymbolRef Sym,
+                                             CheckerContext &C) const {
+  ExplodedNode *N = C.generateSink(St);
+  if (!N)
+    return;
+
+  CFRefBug *BT;
+  switch (ErrorKind) {
+    default:
+      llvm_unreachable("Unhandled error.");
+    case RefVal::ErrorUseAfterRelease:
+      if (!useAfterRelease)
+        useAfterRelease.reset(new UseAfterRelease());
+      BT = &*useAfterRelease;
+      break;
+    case RefVal::ErrorReleaseNotOwned:
+      if (!releaseNotOwned)
+        releaseNotOwned.reset(new BadRelease());
+      BT = &*releaseNotOwned;
+      break;
+    case RefVal::ErrorDeallocGC:
+      if (!deallocGC)
+        deallocGC.reset(new DeallocGC());
+      BT = &*deallocGC;
+      break;
+    case RefVal::ErrorDeallocNotOwned:
+      if (!deallocNotOwned)
+        deallocNotOwned.reset(new DeallocNotOwned());
+      BT = &*deallocNotOwned;
+      break;
+  }
+
+  assert(BT);
+  CFRefReport *report = new CFRefReport(*BT, C.getASTContext().getLangOpts(),
+                                        C.isObjCGCEnabled(), SummaryLog,
+                                        N, Sym);
+  report->addRange(ErrorRange);
+  C.emitReport(report);
+}
+
+//===----------------------------------------------------------------------===//
+// Handle the return values of retain-count-related functions.
+//===----------------------------------------------------------------------===//
+
+bool RetainCountChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
+  // Get the callee. We're only interested in simple C functions.
+  ProgramStateRef state = C.getState();
+  const FunctionDecl *FD = C.getCalleeDecl(CE);
+  if (!FD)
+    return false;
+
+  IdentifierInfo *II = FD->getIdentifier();
+  if (!II)
+    return false;
+
+  // For now, we're only handling the functions that return aliases of their
+  // arguments: CFRetain and CFMakeCollectable (and their families).
+  // Eventually we should add other functions we can model entirely,
+  // such as CFRelease, which don't invalidate their arguments or globals.
+  if (CE->getNumArgs() != 1)
+    return false;
+
+  // Get the name of the function.
+  StringRef FName = II->getName();
+  FName = FName.substr(FName.find_first_not_of('_'));
+
+  // See if it's one of the specific functions we know how to eval.
+  bool canEval = false;
+
+  QualType ResultTy = CE->getCallReturnType();
+  if (ResultTy->isObjCIdType()) {
+    // Handle: id NSMakeCollectable(CFTypeRef)
+    canEval = II->isStr("NSMakeCollectable");
+  } else if (ResultTy->isPointerType()) {
+    // Handle: (CF|CG)Retain
+    //         CFMakeCollectable
+    // It's okay to be a little sloppy here (CGMakeCollectable doesn't exist).
+    if (cocoa::isRefType(ResultTy, "CF", FName) ||
+        cocoa::isRefType(ResultTy, "CG", FName)) {
+      canEval = isRetain(FD, FName) || isMakeCollectable(FD, FName);
+    }
+  }
+        
+  if (!canEval)
+    return false;
+
+  // Bind the return value.
+  const LocationContext *LCtx = C.getLocationContext();
+  SVal RetVal = state->getSVal(CE->getArg(0), LCtx);
+  if (RetVal.isUnknown()) {
+    // If the receiver is unknown, conjure a return value.
+    SValBuilder &SVB = C.getSValBuilder();
+    RetVal = SVB.conjureSymbolVal(0, CE, LCtx, ResultTy, C.blockCount());
+  }
+  state = state->BindExpr(CE, LCtx, RetVal, false);
+
+  // FIXME: This should not be necessary, but otherwise the argument seems to be
+  // considered alive during the next statement.
+  if (const MemRegion *ArgRegion = RetVal.getAsRegion()) {
+    // Save the refcount status of the argument.
+    SymbolRef Sym = RetVal.getAsLocSymbol();
+    const RefVal *Binding = 0;
+    if (Sym)
+      Binding = getRefBinding(state, Sym);
+
+    // Invalidate the argument region.
+    state = state->invalidateRegions(ArgRegion, CE, C.blockCount(), LCtx,
+                                     /*CausesPointerEscape*/ false);
+
+    // Restore the refcount status of the argument.
+    if (Binding)
+      state = setRefBinding(state, Sym, *Binding);
+  }
+
+  C.addTransition(state);
+  return true;
+}
+
+//===----------------------------------------------------------------------===//
+// Handle return statements.
+//===----------------------------------------------------------------------===//
+
+void RetainCountChecker::checkPreStmt(const ReturnStmt *S,
+                                      CheckerContext &C) const {
+
+  // Only adjust the reference count if this is the top-level call frame,
+  // and not the result of inlining.  In the future, we should do
+  // better checking even for inlined calls, and see if they match
+  // with their expected semantics (e.g., the method should return a retained
+  // object, etc.).
+  if (!C.inTopFrame())
+    return;
+
+  const Expr *RetE = S->getRetValue();
+  if (!RetE)
+    return;
+
+  ProgramStateRef state = C.getState();
+  SymbolRef Sym =
+    state->getSValAsScalarOrLoc(RetE, C.getLocationContext()).getAsLocSymbol();
+  if (!Sym)
+    return;
+
+  // Get the reference count binding (if any).
+  const RefVal *T = getRefBinding(state, Sym);
+  if (!T)
+    return;
+
+  // Change the reference count.
+  RefVal X = *T;
+
+  switch (X.getKind()) {
+    case RefVal::Owned: {
+      unsigned cnt = X.getCount();
+      assert(cnt > 0);
+      X.setCount(cnt - 1);
+      X = X ^ RefVal::ReturnedOwned;
+      break;
+    }
+
+    case RefVal::NotOwned: {
+      unsigned cnt = X.getCount();
+      if (cnt) {
+        X.setCount(cnt - 1);
+        X = X ^ RefVal::ReturnedOwned;
+      }
+      else {
+        X = X ^ RefVal::ReturnedNotOwned;
+      }
+      break;
+    }
+
+    default:
+      return;
+  }
+
+  // Update the binding.
+  state = setRefBinding(state, Sym, X);
+  ExplodedNode *Pred = C.addTransition(state);
+
+  // At this point we have updated the state properly.
+  // Everything after this is merely checking to see if the return value has
+  // been over- or under-retained.
+
+  // Did we cache out?
+  if (!Pred)
+    return;
+
+  // Update the autorelease counts.
+  static SimpleProgramPointTag
+         AutoreleaseTag("RetainCountChecker : Autorelease");
+  state = handleAutoreleaseCounts(state, Pred, &AutoreleaseTag, C, Sym, X);
+
+  // Did we cache out?
+  if (!state)
+    return;
+
+  // Get the updated binding.
+  T = getRefBinding(state, Sym);
+  assert(T);
+  X = *T;
+
+  // Consult the summary of the enclosing method.
+  RetainSummaryManager &Summaries = getSummaryManager(C);
+  const Decl *CD = &Pred->getCodeDecl();
+  RetEffect RE = RetEffect::MakeNoRet();
+
+  // FIXME: What is the convention for blocks? Is there one?
+  if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(CD)) {
+    const RetainSummary *Summ = Summaries.getMethodSummary(MD);
+    RE = Summ->getRetEffect();
+  } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CD)) {
+    if (!isa<CXXMethodDecl>(FD)) {
+      const RetainSummary *Summ = Summaries.getFunctionSummary(FD);
+      RE = Summ->getRetEffect();
+    }
+  }
+
+  checkReturnWithRetEffect(S, C, Pred, RE, X, Sym, state);
+}
+
+void RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S,
+                                                  CheckerContext &C,
+                                                  ExplodedNode *Pred,
+                                                  RetEffect RE, RefVal X,
+                                                  SymbolRef Sym,
+                                              ProgramStateRef state) const {
+  // Any leaks or other errors?
+  if (X.isReturnedOwned() && X.getCount() == 0) {
+    if (RE.getKind() != RetEffect::NoRet) {
+      bool hasError = false;
+      if (C.isObjCGCEnabled() && RE.getObjKind() == RetEffect::ObjC) {
+        // Things are more complicated with garbage collection.  If the
+        // returned object is suppose to be an Objective-C object, we have
+        // a leak (as the caller expects a GC'ed object) because no
+        // method should return ownership unless it returns a CF object.
+        hasError = true;
+        X = X ^ RefVal::ErrorGCLeakReturned;
+      }
+      else if (!RE.isOwned()) {
+        // Either we are using GC and the returned object is a CF type
+        // or we aren't using GC.  In either case, we expect that the
+        // enclosing method is expected to return ownership.
+        hasError = true;
+        X = X ^ RefVal::ErrorLeakReturned;
+      }
+
+      if (hasError) {
+        // Generate an error node.
+        state = setRefBinding(state, Sym, X);
+
+        static SimpleProgramPointTag
+               ReturnOwnLeakTag("RetainCountChecker : ReturnsOwnLeak");
+        ExplodedNode *N = C.addTransition(state, Pred, &ReturnOwnLeakTag);
+        if (N) {
+          const LangOptions &LOpts = C.getASTContext().getLangOpts();
+          bool GCEnabled = C.isObjCGCEnabled();
+          CFRefReport *report =
+            new CFRefLeakReport(*getLeakAtReturnBug(LOpts, GCEnabled),
+                                LOpts, GCEnabled, SummaryLog,
+                                N, Sym, C, IncludeAllocationLine);
+
+          C.emitReport(report);
+        }
+      }
+    }
+  } else if (X.isReturnedNotOwned()) {
+    if (RE.isOwned()) {
+      // Trying to return a not owned object to a caller expecting an
+      // owned object.
+      state = setRefBinding(state, Sym, X ^ RefVal::ErrorReturnedNotOwned);
+
+      static SimpleProgramPointTag
+             ReturnNotOwnedTag("RetainCountChecker : ReturnNotOwnedForOwned");
+      ExplodedNode *N = C.addTransition(state, Pred, &ReturnNotOwnedTag);
+      if (N) {
+        if (!returnNotOwnedForOwned)
+          returnNotOwnedForOwned.reset(new ReturnedNotOwnedForOwned());
+
+        CFRefReport *report =
+            new CFRefReport(*returnNotOwnedForOwned,
+                            C.getASTContext().getLangOpts(), 
+                            C.isObjCGCEnabled(), SummaryLog, N, Sym);
+        C.emitReport(report);
+      }
+    }
+  }
+}
+
+//===----------------------------------------------------------------------===//
+// Check various ways a symbol can be invalidated.
+//===----------------------------------------------------------------------===//
+
+void RetainCountChecker::checkBind(SVal loc, SVal val, const Stmt *S,
+                                   CheckerContext &C) const {
+  // Are we storing to something that causes the value to "escape"?
+  bool escapes = true;
+
+  // A value escapes in three possible cases (this may change):
+  //
+  // (1) we are binding to something that is not a memory region.
+  // (2) we are binding to a memregion that does not have stack storage
+  // (3) we are binding to a memregion with stack storage that the store
+  //     does not understand.
+  ProgramStateRef state = C.getState();
+
+  if (Optional<loc::MemRegionVal> regionLoc = loc.getAs<loc::MemRegionVal>()) {
+    escapes = !regionLoc->getRegion()->hasStackStorage();
+
+    if (!escapes) {
+      // To test (3), generate a new state with the binding added.  If it is
+      // the same state, then it escapes (since the store cannot represent
+      // the binding).
+      // Do this only if we know that the store is not supposed to generate the
+      // same state.
+      SVal StoredVal = state->getSVal(regionLoc->getRegion());
+      if (StoredVal != val)
+        escapes = (state == (state->bindLoc(*regionLoc, val)));
+    }
+    if (!escapes) {
+      // Case 4: We do not currently model what happens when a symbol is
+      // assigned to a struct field, so be conservative here and let the symbol
+      // go. TODO: This could definitely be improved upon.
+      escapes = !isa<VarRegion>(regionLoc->getRegion());
+    }
+  }
+
+  // If our store can represent the binding and we aren't storing to something
+  // that doesn't have local storage then just return and have the simulation
+  // state continue as is.
+  if (!escapes)
+      return;
+
+  // Otherwise, find all symbols referenced by 'val' that we are tracking
+  // and stop tracking them.
+  state = state->scanReachableSymbols<StopTrackingCallback>(val).getState();
+  C.addTransition(state);
+}
+
+ProgramStateRef RetainCountChecker::evalAssume(ProgramStateRef state,
+                                                   SVal Cond,
+                                                   bool Assumption) const {
+
+  // FIXME: We may add to the interface of evalAssume the list of symbols
+  //  whose assumptions have changed.  For now we just iterate through the
+  //  bindings and check if any of the tracked symbols are NULL.  This isn't
+  //  too bad since the number of symbols we will track in practice are
+  //  probably small and evalAssume is only called at branches and a few
+  //  other places.
+  RefBindingsTy B = state->get<RefBindings>();
+
+  if (B.isEmpty())
+    return state;
+
+  bool changed = false;
+  RefBindingsTy::Factory &RefBFactory = state->get_context<RefBindings>();
+
+  for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+    // Check if the symbol is null stop tracking the symbol.
+    ConstraintManager &CMgr = state->getConstraintManager();
+    ConditionTruthVal AllocFailed = CMgr.isNull(state, I.getKey());
+    if (AllocFailed.isConstrainedTrue()) {
+      changed = true;
+      B = RefBFactory.remove(B, I.getKey());
+    }
+  }
+
+  if (changed)
+    state = state->set<RefBindings>(B);
+
+  return state;
+}
+
+ProgramStateRef 
+RetainCountChecker::checkRegionChanges(ProgramStateRef state,
+                                    const InvalidatedSymbols *invalidated,
+                                    ArrayRef<const MemRegion *> ExplicitRegions,
+                                    ArrayRef<const MemRegion *> Regions,
+                                    const CallEvent *Call) const {
+  if (!invalidated)
+    return state;
+
+  llvm::SmallPtrSet<SymbolRef, 8> WhitelistedSymbols;
+  for (ArrayRef<const MemRegion *>::iterator I = ExplicitRegions.begin(),
+       E = ExplicitRegions.end(); I != E; ++I) {
+    if (const SymbolicRegion *SR = (*I)->StripCasts()->getAs<SymbolicRegion>())
+      WhitelistedSymbols.insert(SR->getSymbol());
+  }
+
+  for (InvalidatedSymbols::const_iterator I=invalidated->begin(),
+       E = invalidated->end(); I!=E; ++I) {
+    SymbolRef sym = *I;
+    if (WhitelistedSymbols.count(sym))
+      continue;
+    // Remove any existing reference-count binding.
+    state = removeRefBinding(state, sym);
+  }
+  return state;
+}
+
+//===----------------------------------------------------------------------===//
+// Handle dead symbols and end-of-path.
+//===----------------------------------------------------------------------===//
+
+ProgramStateRef
+RetainCountChecker::handleAutoreleaseCounts(ProgramStateRef state,
+                                            ExplodedNode *Pred,
+                                            const ProgramPointTag *Tag,
+                                            CheckerContext &Ctx,
+                                            SymbolRef Sym, RefVal V) const {
+  unsigned ACnt = V.getAutoreleaseCount();
+
+  // No autorelease counts?  Nothing to be done.
+  if (!ACnt)
+    return state;
+
+  assert(!Ctx.isObjCGCEnabled() && "Autorelease counts in GC mode?");
+  unsigned Cnt = V.getCount();
+
+  // FIXME: Handle sending 'autorelease' to already released object.
+
+  if (V.getKind() == RefVal::ReturnedOwned)
+    ++Cnt;
+
+  if (ACnt <= Cnt) {
+    if (ACnt == Cnt) {
+      V.clearCounts();
+      if (V.getKind() == RefVal::ReturnedOwned)
+        V = V ^ RefVal::ReturnedNotOwned;
+      else
+        V = V ^ RefVal::NotOwned;
+    } else {
+      V.setCount(V.getCount() - ACnt);
+      V.setAutoreleaseCount(0);
+    }
+    return setRefBinding(state, Sym, V);
+  }
+
+  // Woah!  More autorelease counts then retain counts left.
+  // Emit hard error.
+  V = V ^ RefVal::ErrorOverAutorelease;
+  state = setRefBinding(state, Sym, V);
+
+  ExplodedNode *N = Ctx.generateSink(state, Pred, Tag);
+  if (N) {
+    SmallString<128> sbuf;
+    llvm::raw_svector_ostream os(sbuf);
+    os << "Object was autoreleased ";
+    if (V.getAutoreleaseCount() > 1)
+      os << V.getAutoreleaseCount() << " times but the object ";
+    else
+      os << "but ";
+    os << "has a +" << V.getCount() << " retain count";
+
+    if (!overAutorelease)
+      overAutorelease.reset(new OverAutorelease());
+
+    const LangOptions &LOpts = Ctx.getASTContext().getLangOpts();
+    CFRefReport *report =
+      new CFRefReport(*overAutorelease, LOpts, /* GCEnabled = */ false,
+                      SummaryLog, N, Sym, os.str());
+    Ctx.emitReport(report);
+  }
+
+  return 0;
+}
+
+ProgramStateRef 
+RetainCountChecker::handleSymbolDeath(ProgramStateRef state,
+                                      SymbolRef sid, RefVal V,
+                                    SmallVectorImpl<SymbolRef> &Leaked) const {
+  bool hasLeak = false;
+  if (V.isOwned())
+    hasLeak = true;
+  else if (V.isNotOwned() || V.isReturnedOwned())
+    hasLeak = (V.getCount() > 0);
+
+  if (!hasLeak)
+    return removeRefBinding(state, sid);
+
+  Leaked.push_back(sid);
+  return setRefBinding(state, sid, V ^ RefVal::ErrorLeak);
+}
+
+ExplodedNode *
+RetainCountChecker::processLeaks(ProgramStateRef state,
+                                 SmallVectorImpl<SymbolRef> &Leaked,
+                                 CheckerContext &Ctx,
+                                 ExplodedNode *Pred) const {
+  // Generate an intermediate node representing the leak point.
+  ExplodedNode *N = Ctx.addTransition(state, Pred);
+
+  if (N) {
+    for (SmallVectorImpl<SymbolRef>::iterator
+         I = Leaked.begin(), E = Leaked.end(); I != E; ++I) {
+
+      const LangOptions &LOpts = Ctx.getASTContext().getLangOpts();
+      bool GCEnabled = Ctx.isObjCGCEnabled();
+      CFRefBug *BT = Pred ? getLeakWithinFunctionBug(LOpts, GCEnabled)
+                          : getLeakAtReturnBug(LOpts, GCEnabled);
+      assert(BT && "BugType not initialized.");
+
+      CFRefLeakReport *report = new CFRefLeakReport(*BT, LOpts, GCEnabled, 
+                                                    SummaryLog, N, *I, Ctx,
+                                                    IncludeAllocationLine);
+      Ctx.emitReport(report);
+    }
+  }
+
+  return N;
+}
+
+void RetainCountChecker::checkEndFunction(CheckerContext &Ctx) const {
+  ProgramStateRef state = Ctx.getState();
+  RefBindingsTy B = state->get<RefBindings>();
+  ExplodedNode *Pred = Ctx.getPredecessor();
+
+  for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+    state = handleAutoreleaseCounts(state, Pred, /*Tag=*/0, Ctx,
+                                    I->first, I->second);
+    if (!state)
+      return;
+  }
+
+  // If the current LocationContext has a parent, don't check for leaks.
+  // We will do that later.
+  // FIXME: we should instead check for imbalances of the retain/releases,
+  // and suggest annotations.
+  if (Ctx.getLocationContext()->getParent())
+    return;
+  
+  B = state->get<RefBindings>();
+  SmallVector<SymbolRef, 10> Leaked;
+
+  for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I)
+    state = handleSymbolDeath(state, I->first, I->second, Leaked);
+
+  processLeaks(state, Leaked, Ctx, Pred);
+}
+
+const ProgramPointTag *
+RetainCountChecker::getDeadSymbolTag(SymbolRef sym) const {
+  const SimpleProgramPointTag *&tag = DeadSymbolTags[sym];
+  if (!tag) {
+    SmallString<64> buf;
+    llvm::raw_svector_ostream out(buf);
+    out << "RetainCountChecker : Dead Symbol : ";
+    sym->dumpToStream(out);
+    tag = new SimpleProgramPointTag(out.str());
+  }
+  return tag;  
+}
+
+void RetainCountChecker::checkDeadSymbols(SymbolReaper &SymReaper,
+                                          CheckerContext &C) const {
+  ExplodedNode *Pred = C.getPredecessor();
+
+  ProgramStateRef state = C.getState();
+  RefBindingsTy B = state->get<RefBindings>();
+  SmallVector<SymbolRef, 10> Leaked;
+
+  // Update counts from autorelease pools
+  for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
+       E = SymReaper.dead_end(); I != E; ++I) {
+    SymbolRef Sym = *I;
+    if (const RefVal *T = B.lookup(Sym)){
+      // Use the symbol as the tag.
+      // FIXME: This might not be as unique as we would like.
+      const ProgramPointTag *Tag = getDeadSymbolTag(Sym);
+      state = handleAutoreleaseCounts(state, Pred, Tag, C, Sym, *T);
+      if (!state)
+        return;
+
+      // Fetch the new reference count from the state, and use it to handle
+      // this symbol.
+      state = handleSymbolDeath(state, *I, *getRefBinding(state, Sym), Leaked);
+    }
+  }
+
+  if (Leaked.empty()) {
+    C.addTransition(state);
+    return;
+  }
+
+  Pred = processLeaks(state, Leaked, C, Pred);
+
+  // Did we cache out?
+  if (!Pred)
+    return;
+
+  // Now generate a new node that nukes the old bindings.
+  // The only bindings left at this point are the leaked symbols.
+  RefBindingsTy::Factory &F = state->get_context<RefBindings>();
+  B = state->get<RefBindings>();
+
+  for (SmallVectorImpl<SymbolRef>::iterator I = Leaked.begin(),
+                                            E = Leaked.end();
+       I != E; ++I)
+    B = F.remove(B, *I);
+
+  state = state->set<RefBindings>(B);
+  C.addTransition(state, Pred);
+}
+
+void RetainCountChecker::printState(raw_ostream &Out, ProgramStateRef State,
+                                    const char *NL, const char *Sep) const {
+
+  RefBindingsTy B = State->get<RefBindings>();
+
+  if (B.isEmpty())
+    return;
+
+  Out << Sep << NL;
+
+  for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+    Out << I->first << " : ";
+    I->second.print(Out);
+    Out << NL;
+  }
+}
+
+//===----------------------------------------------------------------------===//
+// Checker registration.
+//===----------------------------------------------------------------------===//
+
+void ento::registerRetainCountChecker(CheckerManager &Mgr) {
+  Mgr.registerChecker<RetainCountChecker>(Mgr.getAnalyzerOptions());
+}
+
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
new file mode 100644
index 0000000..fe253b7
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
@@ -0,0 +1,91 @@
+//== ReturnPointerRangeChecker.cpp ------------------------------*- C++ -*--==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines ReturnPointerRangeChecker, which is a path-sensitive check
+// which looks for an out-of-bound pointer being returned to callers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class ReturnPointerRangeChecker : 
+    public Checker< check::PreStmt<ReturnStmt> > {
+  mutable OwningPtr<BuiltinBug> BT;
+public:
+    void checkPreStmt(const ReturnStmt *RS, CheckerContext &C) const;
+};
+}
+
+void ReturnPointerRangeChecker::checkPreStmt(const ReturnStmt *RS,
+                                             CheckerContext &C) const {
+  ProgramStateRef state = C.getState();
+
+  const Expr *RetE = RS->getRetValue();
+  if (!RetE)
+    return;
+ 
+  SVal V = state->getSVal(RetE, C.getLocationContext());
+  const MemRegion *R = V.getAsRegion();
+
+  const ElementRegion *ER = dyn_cast_or_null<ElementRegion>(R);
+  if (!ER)
+    return;
+
+  DefinedOrUnknownSVal Idx = ER->getIndex().castAs<DefinedOrUnknownSVal>();
+  // Zero index is always in bound, this also passes ElementRegions created for
+  // pointer casts.
+  if (Idx.isZeroConstant())
+    return;
+  // FIXME: All of this out-of-bounds checking should eventually be refactored
+  // into a common place.
+
+  DefinedOrUnknownSVal NumElements
+    = C.getStoreManager().getSizeInElements(state, ER->getSuperRegion(),
+                                           ER->getValueType());
+
+  ProgramStateRef StInBound = state->assumeInBound(Idx, NumElements, true);
+  ProgramStateRef StOutBound = state->assumeInBound(Idx, NumElements, false);
+  if (StOutBound && !StInBound) {
+    ExplodedNode *N = C.generateSink(StOutBound);
+
+    if (!N)
+      return;
+  
+    // FIXME: This bug correspond to CWE-466.  Eventually we should have bug
+    // types explicitly reference such exploit categories (when applicable).
+    if (!BT)
+      BT.reset(new BuiltinBug("Return of pointer value outside of expected range",
+           "Returned pointer value points outside the original object "
+           "(potential buffer overflow)"));
+
+    // FIXME: It would be nice to eventually make this diagnostic more clear,
+    // e.g., by referencing the original declaration or by saying *why* this
+    // reference is outside the range.
+
+    // Generate a report for this bug.
+    BugReport *report = 
+      new BugReport(*BT, BT->getDescription(), N);
+
+    report->addRange(RetE->getSourceRange());
+    C.emitReport(report);
+  }
+}
+
+void ento::registerReturnPointerRangeChecker(CheckerManager &mgr) {
+  mgr.registerChecker<ReturnPointerRangeChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
new file mode 100644
index 0000000..ed96c40
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
@@ -0,0 +1,123 @@
+//== ReturnUndefChecker.cpp -------------------------------------*- C++ -*--==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines ReturnUndefChecker, which is a path-sensitive
+// check which looks for undefined or garbage values being returned to the
+// caller.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class ReturnUndefChecker : public Checker< check::PreStmt<ReturnStmt> > {
+  mutable OwningPtr<BuiltinBug> BT_Undef;
+  mutable OwningPtr<BuiltinBug> BT_NullReference;
+
+  void emitUndef(CheckerContext &C, const Expr *RetE) const;
+  void checkReference(CheckerContext &C, const Expr *RetE,
+                      DefinedOrUnknownSVal RetVal) const;
+public:
+  void checkPreStmt(const ReturnStmt *RS, CheckerContext &C) const;
+};
+}
+
+void ReturnUndefChecker::checkPreStmt(const ReturnStmt *RS,
+                                      CheckerContext &C) const {
+  const Expr *RetE = RS->getRetValue();
+  if (!RetE)
+    return;
+  SVal RetVal = C.getSVal(RetE);
+
+  const StackFrameContext *SFC = C.getStackFrame();
+  QualType RT = CallEvent::getDeclaredResultType(SFC->getDecl());
+
+  if (RetVal.isUndef()) {
+    // "return;" is modeled to evaluate to an UndefinedVal. Allow UndefinedVal
+    // to be returned in functions returning void to support this pattern:
+    //   void foo() {
+    //     return;
+    //   }
+    //   void test() {
+    //     return foo();
+    //   }
+    if (!RT.isNull() && RT->isVoidType())
+      return;
+
+    // Not all blocks have explicitly-specified return types; if the return type
+    // is not available, but the return value expression has 'void' type, assume
+    // Sema already checked it.
+    if (RT.isNull() && isa<BlockDecl>(SFC->getDecl()) &&
+        RetE->getType()->isVoidType())
+      return;
+
+    emitUndef(C, RetE);
+    return;
+  }
+
+  if (RT.isNull())
+    return;
+
+  if (RT->isReferenceType()) {
+    checkReference(C, RetE, RetVal.castAs<DefinedOrUnknownSVal>());
+    return;
+  }
+}
+
+static void emitBug(CheckerContext &C, BuiltinBug &BT, const Expr *RetE,
+                    const Expr *TrackingE = 0) {
+  ExplodedNode *N = C.generateSink();
+  if (!N)
+    return;
+
+  BugReport *Report = new BugReport(BT, BT.getDescription(), N);
+
+  Report->addRange(RetE->getSourceRange());
+  bugreporter::trackNullOrUndefValue(N, TrackingE ? TrackingE : RetE, *Report);
+
+  C.emitReport(Report);
+}
+
+void ReturnUndefChecker::emitUndef(CheckerContext &C, const Expr *RetE) const {
+  if (!BT_Undef)
+    BT_Undef.reset(new BuiltinBug("Garbage return value",
+                                  "Undefined or garbage value "
+                                    "returned to caller"));
+  emitBug(C, *BT_Undef, RetE);
+}
+
+void ReturnUndefChecker::checkReference(CheckerContext &C, const Expr *RetE,
+                                        DefinedOrUnknownSVal RetVal) const {
+  ProgramStateRef StNonNull, StNull;
+  llvm::tie(StNonNull, StNull) = C.getState()->assume(RetVal);
+
+  if (StNonNull) {
+    // Going forward, assume the location is non-null.
+    C.addTransition(StNonNull);
+    return;
+  }
+
+  // The return value is known to be null. Emit a bug report.
+  if (!BT_NullReference)
+    BT_NullReference.reset(new BuiltinBug("Returning null reference"));
+
+  emitBug(C, *BT_NullReference, RetE, bugreporter::getDerefExpr(RetE));
+}
+
+void ento::registerReturnUndefChecker(CheckerManager &mgr) {
+  mgr.registerChecker<ReturnUndefChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
new file mode 100644
index 0000000..1ccf339
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
@@ -0,0 +1,289 @@
+//===-- SimpleStreamChecker.cpp -----------------------------------------*- C++ -*--//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines a checker for proper use of fopen/fclose APIs.
+//   - If a file has been closed with fclose, it should not be accessed again.
+//   Accessing a closed file results in undefined behavior.
+//   - If a file was opened with fopen, it must be closed with fclose before
+//   the execution ends. Failing to do so results in a resource leak.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+typedef SmallVector<SymbolRef, 2> SymbolVector;
+
+struct StreamState {
+private:
+  enum Kind { Opened, Closed } K;
+  StreamState(Kind InK) : K(InK) { }
+
+public:
+  bool isOpened() const { return K == Opened; }
+  bool isClosed() const { return K == Closed; }
+
+  static StreamState getOpened() { return StreamState(Opened); }
+  static StreamState getClosed() { return StreamState(Closed); }
+
+  bool operator==(const StreamState &X) const {
+    return K == X.K;
+  }
+  void Profile(llvm::FoldingSetNodeID &ID) const {
+    ID.AddInteger(K);
+  }
+};
+
+class SimpleStreamChecker : public Checker<check::PostCall,
+                                           check::PreCall,
+                                           check::DeadSymbols,
+                                           check::PointerEscape> {
+
+  mutable IdentifierInfo *IIfopen, *IIfclose;
+
+  OwningPtr<BugType> DoubleCloseBugType;
+  OwningPtr<BugType> LeakBugType;
+
+  void initIdentifierInfo(ASTContext &Ctx) const;
+
+  void reportDoubleClose(SymbolRef FileDescSym,
+                         const CallEvent &Call,
+                         CheckerContext &C) const;
+
+  void reportLeaks(SymbolVector LeakedStreams,
+                   CheckerContext &C,
+                   ExplodedNode *ErrNode) const;
+
+  bool guaranteedNotToCloseFile(const CallEvent &Call) const;
+
+public:
+  SimpleStreamChecker();
+
+  /// Process fopen.
+  void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+  /// Process fclose.
+  void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+
+  void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
+
+  /// Stop tracking addresses which escape.
+  ProgramStateRef checkPointerEscape(ProgramStateRef State,
+                                    const InvalidatedSymbols &Escaped,
+                                    const CallEvent *Call,
+                                    PointerEscapeKind Kind) const;
+};
+
+} // end anonymous namespace
+
+/// The state of the checker is a map from tracked stream symbols to their
+/// state. Let's store it in the ProgramState.
+REGISTER_MAP_WITH_PROGRAMSTATE(StreamMap, SymbolRef, StreamState)
+
+namespace {
+class StopTrackingCallback : public SymbolVisitor {
+  ProgramStateRef state;
+public:
+  StopTrackingCallback(ProgramStateRef st) : state(st) {}
+  ProgramStateRef getState() const { return state; }
+
+  bool VisitSymbol(SymbolRef sym) {
+    state = state->remove<StreamMap>(sym);
+    return true;
+  }
+};
+} // end anonymous namespace
+
+
+SimpleStreamChecker::SimpleStreamChecker() : IIfopen(0), IIfclose(0) {
+  // Initialize the bug types.
+  DoubleCloseBugType.reset(new BugType("Double fclose",
+                                       "Unix Stream API Error"));
+
+  LeakBugType.reset(new BugType("Resource Leak",
+                                "Unix Stream API Error"));
+  // Sinks are higher importance bugs as well as calls to assert() or exit(0).
+  LeakBugType->setSuppressOnSink(true);
+}
+
+void SimpleStreamChecker::checkPostCall(const CallEvent &Call,
+                                        CheckerContext &C) const {
+  initIdentifierInfo(C.getASTContext());
+
+  if (!Call.isGlobalCFunction())
+    return;
+
+  if (Call.getCalleeIdentifier() != IIfopen)
+    return;
+
+  // Get the symbolic value corresponding to the file handle.
+  SymbolRef FileDesc = Call.getReturnValue().getAsSymbol();
+  if (!FileDesc)
+    return;
+
+  // Generate the next transition (an edge in the exploded graph).
+  ProgramStateRef State = C.getState();
+  State = State->set<StreamMap>(FileDesc, StreamState::getOpened());
+  C.addTransition(State);
+}
+
+void SimpleStreamChecker::checkPreCall(const CallEvent &Call,
+                                       CheckerContext &C) const {
+  initIdentifierInfo(C.getASTContext());
+
+  if (!Call.isGlobalCFunction())
+    return;
+
+  if (Call.getCalleeIdentifier() != IIfclose)
+    return;
+
+  if (Call.getNumArgs() != 1)
+    return;
+
+  // Get the symbolic value corresponding to the file handle.
+  SymbolRef FileDesc = Call.getArgSVal(0).getAsSymbol();
+  if (!FileDesc)
+    return;
+
+  // Check if the stream has already been closed.
+  ProgramStateRef State = C.getState();
+  const StreamState *SS = State->get<StreamMap>(FileDesc);
+  if (SS && SS->isClosed()) {
+    reportDoubleClose(FileDesc, Call, C);
+    return;
+  }
+
+  // Generate the next transition, in which the stream is closed.
+  State = State->set<StreamMap>(FileDesc, StreamState::getClosed());
+  C.addTransition(State);
+}
+
+static bool isLeaked(SymbolRef Sym, const StreamState &SS,
+                     bool IsSymDead, ProgramStateRef State) {
+  if (IsSymDead && SS.isOpened()) {
+    // If a symbol is NULL, assume that fopen failed on this path.
+    // A symbol should only be considered leaked if it is non-null.
+    ConstraintManager &CMgr = State->getConstraintManager();
+    ConditionTruthVal OpenFailed = CMgr.isNull(State, Sym);
+    return !OpenFailed.isConstrainedTrue();
+  }
+  return false;
+}
+
+void SimpleStreamChecker::checkDeadSymbols(SymbolReaper &SymReaper,
+                                           CheckerContext &C) const {
+  ProgramStateRef State = C.getState();
+  SymbolVector LeakedStreams;
+  StreamMapTy TrackedStreams = State->get<StreamMap>();
+  for (StreamMapTy::iterator I = TrackedStreams.begin(),
+                             E = TrackedStreams.end(); I != E; ++I) {
+    SymbolRef Sym = I->first;
+    bool IsSymDead = SymReaper.isDead(Sym);
+
+    // Collect leaked symbols.
+    if (isLeaked(Sym, I->second, IsSymDead, State))
+      LeakedStreams.push_back(Sym);
+
+    // Remove the dead symbol from the streams map.
+    if (IsSymDead)
+      State = State->remove<StreamMap>(Sym);
+  }
+
+  ExplodedNode *N = C.addTransition(State);
+  reportLeaks(LeakedStreams, C, N);
+}
+
+void SimpleStreamChecker::reportDoubleClose(SymbolRef FileDescSym,
+                                            const CallEvent &Call,
+                                            CheckerContext &C) const {
+  // We reached a bug, stop exploring the path here by generating a sink.
+  ExplodedNode *ErrNode = C.generateSink();
+  // If we've already reached this node on another path, return.
+  if (!ErrNode)
+    return;
+
+  // Generate the report.
+  BugReport *R = new BugReport(*DoubleCloseBugType,
+      "Closing a previously closed file stream", ErrNode);
+  R->addRange(Call.getSourceRange());
+  R->markInteresting(FileDescSym);
+  C.emitReport(R);
+}
+
+void SimpleStreamChecker::reportLeaks(SymbolVector LeakedStreams,
+                                               CheckerContext &C,
+                                               ExplodedNode *ErrNode) const {
+  // Attach bug reports to the leak node.
+  // TODO: Identify the leaked file descriptor.
+  for (SmallVector<SymbolRef, 2>::iterator
+      I = LeakedStreams.begin(), E = LeakedStreams.end(); I != E; ++I) {
+    BugReport *R = new BugReport(*LeakBugType,
+        "Opened file is never closed; potential resource leak", ErrNode);
+    R->markInteresting(*I);
+    C.emitReport(R);
+  }
+}
+
+bool SimpleStreamChecker::guaranteedNotToCloseFile(const CallEvent &Call) const{
+  // If it's not in a system header, assume it might close a file.
+  if (!Call.isInSystemHeader())
+    return false;
+
+  // Handle cases where we know a buffer's /address/ can escape.
+  if (Call.argumentsMayEscape())
+    return false;
+
+  // Note, even though fclose closes the file, we do not list it here
+  // since the checker is modeling the call.
+
+  return true;
+}
+
+// If the pointer we are tracking escaped, do not track the symbol as
+// we cannot reason about it anymore.
+ProgramStateRef
+SimpleStreamChecker::checkPointerEscape(ProgramStateRef State,
+                                        const InvalidatedSymbols &Escaped,
+                                        const CallEvent *Call,
+                                        PointerEscapeKind Kind) const {
+  // If we know that the call cannot close a file, there is nothing to do.
+  if ((Kind == PSK_DirectEscapeOnCall ||
+       Kind == PSK_IndirectEscapeOnCall) &&
+      guaranteedNotToCloseFile(*Call)) {
+    return State;
+  }
+
+  for (InvalidatedSymbols::const_iterator I = Escaped.begin(),
+                                          E = Escaped.end();
+                                          I != E; ++I) {
+    SymbolRef Sym = *I;
+
+    // The symbol escaped. Optimistically, assume that the corresponding file
+    // handle will be closed somewhere else.
+    State = State->remove<StreamMap>(Sym);
+  }
+  return State;
+}
+
+void SimpleStreamChecker::initIdentifierInfo(ASTContext &Ctx) const {
+  if (IIfopen)
+    return;
+  IIfopen = &Ctx.Idents.get("fopen");
+  IIfclose = &Ctx.Idents.get("fclose");
+}
+
+void ento::registerSimpleStreamChecker(CheckerManager &mgr) {
+  mgr.registerChecker<SimpleStreamChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
new file mode 100644
index 0000000..4fd778e
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
@@ -0,0 +1,244 @@
+//=== StackAddrEscapeChecker.cpp ----------------------------------*- C++ -*--//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines stack address leak checker, which checks if an invalid 
+// stack address is stored into a global or heap location. See CERT DCL30-C.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+using namespace ento;
+
+namespace {
+class StackAddrEscapeChecker : public Checker< check::PreStmt<ReturnStmt>,
+                                               check::EndFunction > {
+  mutable OwningPtr<BuiltinBug> BT_stackleak;
+  mutable OwningPtr<BuiltinBug> BT_returnstack;
+
+public:
+  void checkPreStmt(const ReturnStmt *RS, CheckerContext &C) const;
+  void checkEndFunction(CheckerContext &Ctx) const;
+private:
+  void EmitStackError(CheckerContext &C, const MemRegion *R,
+                      const Expr *RetE) const;
+  static SourceRange genName(raw_ostream &os, const MemRegion *R,
+                             ASTContext &Ctx);
+};
+}
+
+SourceRange StackAddrEscapeChecker::genName(raw_ostream &os, const MemRegion *R,
+                                            ASTContext &Ctx) {
+    // Get the base region, stripping away fields and elements.
+  R = R->getBaseRegion();
+  SourceManager &SM = Ctx.getSourceManager();
+  SourceRange range;
+  os << "Address of ";
+  
+  // Check if the region is a compound literal.
+  if (const CompoundLiteralRegion* CR = dyn_cast<CompoundLiteralRegion>(R)) { 
+    const CompoundLiteralExpr *CL = CR->getLiteralExpr();
+    os << "stack memory associated with a compound literal "
+          "declared on line "
+        << SM.getExpansionLineNumber(CL->getLocStart())
+        << " returned to caller";    
+    range = CL->getSourceRange();
+  }
+  else if (const AllocaRegion* AR = dyn_cast<AllocaRegion>(R)) {
+    const Expr *ARE = AR->getExpr();
+    SourceLocation L = ARE->getLocStart();
+    range = ARE->getSourceRange();    
+    os << "stack memory allocated by call to alloca() on line "
+       << SM.getExpansionLineNumber(L);
+  }
+  else if (const BlockDataRegion *BR = dyn_cast<BlockDataRegion>(R)) {
+    const BlockDecl *BD = BR->getCodeRegion()->getDecl();
+    SourceLocation L = BD->getLocStart();
+    range = BD->getSourceRange();
+    os << "stack-allocated block declared on line "
+       << SM.getExpansionLineNumber(L);
+  }
+  else if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
+    os << "stack memory associated with local variable '"
+       << VR->getString() << '\'';
+    range = VR->getDecl()->getSourceRange();
+  }
+  else if (const CXXTempObjectRegion *TOR = dyn_cast<CXXTempObjectRegion>(R)) {
+    QualType Ty = TOR->getValueType().getLocalUnqualifiedType();
+    os << "stack memory associated with temporary object of type '";
+    Ty.print(os, Ctx.getPrintingPolicy());
+    os << "'";
+    range = TOR->getExpr()->getSourceRange();
+  }
+  else {
+    llvm_unreachable("Invalid region in ReturnStackAddressChecker.");
+  } 
+  
+  return range;
+}
+
+void StackAddrEscapeChecker::EmitStackError(CheckerContext &C, const MemRegion *R,
+                                          const Expr *RetE) const {
+  ExplodedNode *N = C.generateSink();
+
+  if (!N)
+    return;
+
+  if (!BT_returnstack)
+   BT_returnstack.reset(
+                 new BuiltinBug("Return of address to stack-allocated memory"));
+
+  // Generate a report for this bug.
+  SmallString<512> buf;
+  llvm::raw_svector_ostream os(buf);
+  SourceRange range = genName(os, R, C.getASTContext());
+  os << " returned to caller";
+  BugReport *report = new BugReport(*BT_returnstack, os.str(), N);
+  report->addRange(RetE->getSourceRange());
+  if (range.isValid())
+    report->addRange(range);
+
+  C.emitReport(report);
+}
+
+void StackAddrEscapeChecker::checkPreStmt(const ReturnStmt *RS,
+                                          CheckerContext &C) const {
+  
+  const Expr *RetE = RS->getRetValue();
+  if (!RetE)
+    return;
+  RetE = RetE->IgnoreParens();
+
+  const LocationContext *LCtx = C.getLocationContext();
+  SVal V = C.getState()->getSVal(RetE, LCtx);
+  const MemRegion *R = V.getAsRegion();
+
+  if (!R)
+    return;
+  
+  const StackSpaceRegion *SS =
+    dyn_cast_or_null<StackSpaceRegion>(R->getMemorySpace());
+    
+  if (!SS)
+    return;
+
+  // Return stack memory in an ancestor stack frame is fine.
+  const StackFrameContext *CurFrame = LCtx->getCurrentStackFrame();
+  const StackFrameContext *MemFrame = SS->getStackFrame();
+  if (MemFrame != CurFrame)
+    return;
+
+  // Automatic reference counting automatically copies blocks.
+  if (C.getASTContext().getLangOpts().ObjCAutoRefCount &&
+      isa<BlockDataRegion>(R))
+    return;
+
+  // Returning a record by value is fine. (In this case, the returned
+  // expression will be a copy-constructor, possibly wrapped in an
+  // ExprWithCleanups node.)
+  if (const ExprWithCleanups *Cleanup = dyn_cast<ExprWithCleanups>(RetE))
+    RetE = Cleanup->getSubExpr();
+  if (isa<CXXConstructExpr>(RetE) && RetE->getType()->isRecordType())
+    return;
+
+  EmitStackError(C, R, RetE);
+}
+
+void StackAddrEscapeChecker::checkEndFunction(CheckerContext &Ctx) const {
+  ProgramStateRef state = Ctx.getState();
+
+  // Iterate over all bindings to global variables and see if it contains
+  // a memory region in the stack space.
+  class CallBack : public StoreManager::BindingsHandler {
+  private:
+    CheckerContext &Ctx;
+    const StackFrameContext *CurSFC;
+  public:
+    SmallVector<std::pair<const MemRegion*, const MemRegion*>, 10> V;
+
+    CallBack(CheckerContext &CC) :
+      Ctx(CC),
+      CurSFC(CC.getLocationContext()->getCurrentStackFrame())
+    {}
+    
+    bool HandleBinding(StoreManager &SMgr, Store store,
+                       const MemRegion *region, SVal val) {
+      
+      if (!isa<GlobalsSpaceRegion>(region->getMemorySpace()))
+        return true;
+      
+      const MemRegion *vR = val.getAsRegion();
+      if (!vR)
+        return true;
+        
+      // Under automated retain release, it is okay to assign a block
+      // directly to a global variable.
+      if (Ctx.getASTContext().getLangOpts().ObjCAutoRefCount &&
+          isa<BlockDataRegion>(vR))
+        return true;
+
+      if (const StackSpaceRegion *SSR = 
+          dyn_cast<StackSpaceRegion>(vR->getMemorySpace())) {
+        // If the global variable holds a location in the current stack frame,
+        // record the binding to emit a warning.
+        if (SSR->getStackFrame() == CurSFC)
+          V.push_back(std::make_pair(region, vR));
+      }
+      
+      return true;
+    }
+  };
+    
+  CallBack cb(Ctx);
+  state->getStateManager().getStoreManager().iterBindings(state->getStore(),cb);
+
+  if (cb.V.empty())
+    return;
+
+  // Generate an error node.
+  ExplodedNode *N = Ctx.addTransition(state);
+  if (!N)
+    return;
+
+  if (!BT_stackleak)
+    BT_stackleak.reset(
+      new BuiltinBug("Stack address stored into global variable",
+                     "Stack address was saved into a global variable. "
+                     "This is dangerous because the address will become "
+                     "invalid after returning from the function"));
+  
+  for (unsigned i = 0, e = cb.V.size(); i != e; ++i) {
+    // Generate a report for this bug.
+    SmallString<512> buf;
+    llvm::raw_svector_ostream os(buf);
+    SourceRange range = genName(os, cb.V[i].second, Ctx.getASTContext());
+    os << " is still referred to by the global variable '";
+    const VarRegion *VR = cast<VarRegion>(cb.V[i].first->getBaseRegion());
+    os << *VR->getDecl()
+       << "' upon returning to the caller.  This will be a dangling reference";
+    BugReport *report = new BugReport(*BT_stackleak, os.str(), N);
+    if (range.isValid())
+      report->addRange(range);
+
+    Ctx.emitReport(report);
+  }
+}
+
+void ento::registerStackAddrEscapeChecker(CheckerManager &mgr) {
+  mgr.registerChecker<StackAddrEscapeChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
new file mode 100644
index 0000000..ffdf2d5
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
@@ -0,0 +1,422 @@
+//===-- StreamChecker.cpp -----------------------------------------*- C++ -*--//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines checkers that model and check stream handling functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "llvm/ADT/ImmutableMap.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+struct StreamState {
+  enum Kind { Opened, Closed, OpenFailed, Escaped } K;
+  const Stmt *S;
+
+  StreamState(Kind k, const Stmt *s) : K(k), S(s) {}
+
+  bool isOpened() const { return K == Opened; }
+  bool isClosed() const { return K == Closed; }
+  //bool isOpenFailed() const { return K == OpenFailed; }
+  //bool isEscaped() const { return K == Escaped; }
+
+  bool operator==(const StreamState &X) const {
+    return K == X.K && S == X.S;
+  }
+
+  static StreamState getOpened(const Stmt *s) { return StreamState(Opened, s); }
+  static StreamState getClosed(const Stmt *s) { return StreamState(Closed, s); }
+  static StreamState getOpenFailed(const Stmt *s) { 
+    return StreamState(OpenFailed, s); 
+  }
+  static StreamState getEscaped(const Stmt *s) {
+    return StreamState(Escaped, s);
+  }
+
+  void Profile(llvm::FoldingSetNodeID &ID) const {
+    ID.AddInteger(K);
+    ID.AddPointer(S);
+  }
+};
+
+class StreamChecker : public Checker<eval::Call,
+                                     check::DeadSymbols > {
+  mutable IdentifierInfo *II_fopen, *II_tmpfile, *II_fclose, *II_fread,
+                 *II_fwrite, 
+                 *II_fseek, *II_ftell, *II_rewind, *II_fgetpos, *II_fsetpos,  
+                 *II_clearerr, *II_feof, *II_ferror, *II_fileno;
+  mutable OwningPtr<BuiltinBug> BT_nullfp, BT_illegalwhence,
+                                      BT_doubleclose, BT_ResourceLeak;
+
+public:
+  StreamChecker() 
+    : II_fopen(0), II_tmpfile(0) ,II_fclose(0), II_fread(0), II_fwrite(0), 
+      II_fseek(0), II_ftell(0), II_rewind(0), II_fgetpos(0), II_fsetpos(0), 
+      II_clearerr(0), II_feof(0), II_ferror(0), II_fileno(0) {}
+
+  bool evalCall(const CallExpr *CE, CheckerContext &C) const;
+  void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
+
+private:
+  void Fopen(CheckerContext &C, const CallExpr *CE) const;
+  void Tmpfile(CheckerContext &C, const CallExpr *CE) const;
+  void Fclose(CheckerContext &C, const CallExpr *CE) const;
+  void Fread(CheckerContext &C, const CallExpr *CE) const;
+  void Fwrite(CheckerContext &C, const CallExpr *CE) const;
+  void Fseek(CheckerContext &C, const CallExpr *CE) const;
+  void Ftell(CheckerContext &C, const CallExpr *CE) const;
+  void Rewind(CheckerContext &C, const CallExpr *CE) const;
+  void Fgetpos(CheckerContext &C, const CallExpr *CE) const;
+  void Fsetpos(CheckerContext &C, const CallExpr *CE) const;
+  void Clearerr(CheckerContext &C, const CallExpr *CE) const;
+  void Feof(CheckerContext &C, const CallExpr *CE) const;
+  void Ferror(CheckerContext &C, const CallExpr *CE) const;
+  void Fileno(CheckerContext &C, const CallExpr *CE) const;
+
+  void OpenFileAux(CheckerContext &C, const CallExpr *CE) const;
+  
+  ProgramStateRef CheckNullStream(SVal SV, ProgramStateRef state, 
+                                 CheckerContext &C) const;
+  ProgramStateRef CheckDoubleClose(const CallExpr *CE, ProgramStateRef state, 
+                                 CheckerContext &C) const;
+};
+
+} // end anonymous namespace
+
+REGISTER_MAP_WITH_PROGRAMSTATE(StreamMap, SymbolRef, StreamState)
+
+
+bool StreamChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
+  const FunctionDecl *FD = C.getCalleeDecl(CE);
+  if (!FD || FD->getKind() != Decl::Function)
+    return false;
+
+  ASTContext &Ctx = C.getASTContext();
+  if (!II_fopen)
+    II_fopen = &Ctx.Idents.get("fopen");
+  if (!II_tmpfile)
+    II_tmpfile = &Ctx.Idents.get("tmpfile");
+  if (!II_fclose)
+    II_fclose = &Ctx.Idents.get("fclose");
+  if (!II_fread)
+    II_fread = &Ctx.Idents.get("fread");
+  if (!II_fwrite)
+    II_fwrite = &Ctx.Idents.get("fwrite");
+  if (!II_fseek)
+    II_fseek = &Ctx.Idents.get("fseek");
+  if (!II_ftell)
+    II_ftell = &Ctx.Idents.get("ftell");
+  if (!II_rewind)
+    II_rewind = &Ctx.Idents.get("rewind");
+  if (!II_fgetpos)
+    II_fgetpos = &Ctx.Idents.get("fgetpos");
+  if (!II_fsetpos)
+    II_fsetpos = &Ctx.Idents.get("fsetpos");
+  if (!II_clearerr)
+    II_clearerr = &Ctx.Idents.get("clearerr");
+  if (!II_feof)
+    II_feof = &Ctx.Idents.get("feof");
+  if (!II_ferror)
+    II_ferror = &Ctx.Idents.get("ferror");
+  if (!II_fileno)
+    II_fileno = &Ctx.Idents.get("fileno");
+
+  if (FD->getIdentifier() == II_fopen) {
+    Fopen(C, CE);
+    return true;
+  }
+  if (FD->getIdentifier() == II_tmpfile) {
+    Tmpfile(C, CE);
+    return true;
+  }
+  if (FD->getIdentifier() == II_fclose) {
+    Fclose(C, CE);
+    return true;
+  }
+  if (FD->getIdentifier() == II_fread) {
+    Fread(C, CE);
+    return true;
+  }
+  if (FD->getIdentifier() == II_fwrite) {
+    Fwrite(C, CE);
+    return true;
+  }
+  if (FD->getIdentifier() == II_fseek) {
+    Fseek(C, CE);
+    return true;
+  }
+  if (FD->getIdentifier() == II_ftell) {
+    Ftell(C, CE);
+    return true;
+  }
+  if (FD->getIdentifier() == II_rewind) {
+    Rewind(C, CE);
+    return true;
+  }
+  if (FD->getIdentifier() == II_fgetpos) {
+    Fgetpos(C, CE);
+    return true;
+  }
+  if (FD->getIdentifier() == II_fsetpos) {
+    Fsetpos(C, CE);
+    return true;
+  }
+  if (FD->getIdentifier() == II_clearerr) {
+    Clearerr(C, CE);
+    return true;
+  }
+  if (FD->getIdentifier() == II_feof) {
+    Feof(C, CE);
+    return true;
+  }
+  if (FD->getIdentifier() == II_ferror) {
+    Ferror(C, CE);
+    return true;
+  }
+  if (FD->getIdentifier() == II_fileno) {
+    Fileno(C, CE);
+    return true;
+  }
+
+  return false;
+}
+
+void StreamChecker::Fopen(CheckerContext &C, const CallExpr *CE) const {
+  OpenFileAux(C, CE);
+}
+
+void StreamChecker::Tmpfile(CheckerContext &C, const CallExpr *CE) const {
+  OpenFileAux(C, CE);
+}
+
+void StreamChecker::OpenFileAux(CheckerContext &C, const CallExpr *CE) const {
+  ProgramStateRef state = C.getState();
+  SValBuilder &svalBuilder = C.getSValBuilder();
+  const LocationContext *LCtx = C.getPredecessor()->getLocationContext();
+  DefinedSVal RetVal = svalBuilder.conjureSymbolVal(0, CE, LCtx, C.blockCount())
+      .castAs<DefinedSVal>();
+  state = state->BindExpr(CE, C.getLocationContext(), RetVal);
+  
+  ConstraintManager &CM = C.getConstraintManager();
+  // Bifurcate the state into two: one with a valid FILE* pointer, the other
+  // with a NULL.
+  ProgramStateRef stateNotNull, stateNull;
+  llvm::tie(stateNotNull, stateNull) = CM.assumeDual(state, RetVal);
+  
+  if (SymbolRef Sym = RetVal.getAsSymbol()) {
+    // if RetVal is not NULL, set the symbol's state to Opened.
+    stateNotNull =
+      stateNotNull->set<StreamMap>(Sym,StreamState::getOpened(CE));
+    stateNull =
+      stateNull->set<StreamMap>(Sym, StreamState::getOpenFailed(CE));
+
+    C.addTransition(stateNotNull);
+    C.addTransition(stateNull);
+  }
+}
+
+void StreamChecker::Fclose(CheckerContext &C, const CallExpr *CE) const {
+  ProgramStateRef state = CheckDoubleClose(CE, C.getState(), C);
+  if (state)
+    C.addTransition(state);
+}
+
+void StreamChecker::Fread(CheckerContext &C, const CallExpr *CE) const {
+  ProgramStateRef state = C.getState();
+  if (!CheckNullStream(state->getSVal(CE->getArg(3), C.getLocationContext()),
+                       state, C))
+    return;
+}
+
+void StreamChecker::Fwrite(CheckerContext &C, const CallExpr *CE) const {
+  ProgramStateRef state = C.getState();
+  if (!CheckNullStream(state->getSVal(CE->getArg(3), C.getLocationContext()),
+                       state, C))
+    return;
+}
+
+void StreamChecker::Fseek(CheckerContext &C, const CallExpr *CE) const {
+  ProgramStateRef state = C.getState();
+  if (!(state = CheckNullStream(state->getSVal(CE->getArg(0),
+                                               C.getLocationContext()), state, C)))
+    return;
+  // Check the legality of the 'whence' argument of 'fseek'.
+  SVal Whence = state->getSVal(CE->getArg(2), C.getLocationContext());
+  Optional<nonloc::ConcreteInt> CI = Whence.getAs<nonloc::ConcreteInt>();
+
+  if (!CI)
+    return;
+
+  int64_t x = CI->getValue().getSExtValue();
+  if (x >= 0 && x <= 2)
+    return;
+
+  if (ExplodedNode *N = C.addTransition(state)) {
+    if (!BT_illegalwhence)
+      BT_illegalwhence.reset(new BuiltinBug("Illegal whence argument",
+					"The whence argument to fseek() should be "
+					"SEEK_SET, SEEK_END, or SEEK_CUR."));
+    BugReport *R = new BugReport(*BT_illegalwhence, 
+				 BT_illegalwhence->getDescription(), N);
+    C.emitReport(R);
+  }
+}
+
+void StreamChecker::Ftell(CheckerContext &C, const CallExpr *CE) const {
+  ProgramStateRef state = C.getState();
+  if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+                       state, C))
+    return;
+}
+
+void StreamChecker::Rewind(CheckerContext &C, const CallExpr *CE) const {
+  ProgramStateRef state = C.getState();
+  if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+                       state, C))
+    return;
+}
+
+void StreamChecker::Fgetpos(CheckerContext &C, const CallExpr *CE) const {
+  ProgramStateRef state = C.getState();
+  if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+                       state, C))
+    return;
+}
+
+void StreamChecker::Fsetpos(CheckerContext &C, const CallExpr *CE) const {
+  ProgramStateRef state = C.getState();
+  if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+                       state, C))
+    return;
+}
+
+void StreamChecker::Clearerr(CheckerContext &C, const CallExpr *CE) const {
+  ProgramStateRef state = C.getState();
+  if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+                       state, C))
+    return;
+}
+
+void StreamChecker::Feof(CheckerContext &C, const CallExpr *CE) const {
+  ProgramStateRef state = C.getState();
+  if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+                       state, C))
+    return;
+}
+
+void StreamChecker::Ferror(CheckerContext &C, const CallExpr *CE) const {
+  ProgramStateRef state = C.getState();
+  if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+                       state, C))
+    return;
+}
+
+void StreamChecker::Fileno(CheckerContext &C, const CallExpr *CE) const {
+  ProgramStateRef state = C.getState();
+  if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+                       state, C))
+    return;
+}
+
+ProgramStateRef StreamChecker::CheckNullStream(SVal SV, ProgramStateRef state,
+                                    CheckerContext &C) const {
+  Optional<DefinedSVal> DV = SV.getAs<DefinedSVal>();
+  if (!DV)
+    return 0;
+
+  ConstraintManager &CM = C.getConstraintManager();
+  ProgramStateRef stateNotNull, stateNull;
+  llvm::tie(stateNotNull, stateNull) = CM.assumeDual(state, *DV);
+
+  if (!stateNotNull && stateNull) {
+    if (ExplodedNode *N = C.generateSink(stateNull)) {
+      if (!BT_nullfp)
+        BT_nullfp.reset(new BuiltinBug("NULL stream pointer",
+                                     "Stream pointer might be NULL."));
+      BugReport *R =new BugReport(*BT_nullfp, BT_nullfp->getDescription(), N);
+      C.emitReport(R);
+    }
+    return 0;
+  }
+  return stateNotNull;
+}
+
+ProgramStateRef StreamChecker::CheckDoubleClose(const CallExpr *CE,
+                                               ProgramStateRef state,
+                                               CheckerContext &C) const {
+  SymbolRef Sym =
+    state->getSVal(CE->getArg(0), C.getLocationContext()).getAsSymbol();
+  if (!Sym)
+    return state;
+  
+  const StreamState *SS = state->get<StreamMap>(Sym);
+
+  // If the file stream is not tracked, return.
+  if (!SS)
+    return state;
+  
+  // Check: Double close a File Descriptor could cause undefined behaviour.
+  // Conforming to man-pages
+  if (SS->isClosed()) {
+    ExplodedNode *N = C.generateSink();
+    if (N) {
+      if (!BT_doubleclose)
+        BT_doubleclose.reset(new BuiltinBug("Double fclose",
+                                        "Try to close a file Descriptor already"
+                                        " closed. Cause undefined behaviour."));
+      BugReport *R = new BugReport(*BT_doubleclose,
+                                   BT_doubleclose->getDescription(), N);
+      C.emitReport(R);
+    }
+    return NULL;
+  }
+  
+  // Close the File Descriptor.
+  return state->set<StreamMap>(Sym, StreamState::getClosed(CE));
+}
+
+void StreamChecker::checkDeadSymbols(SymbolReaper &SymReaper,
+                                     CheckerContext &C) const {
+  // TODO: Clean up the state.
+  for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
+         E = SymReaper.dead_end(); I != E; ++I) {
+    SymbolRef Sym = *I;
+    ProgramStateRef state = C.getState();
+    const StreamState *SS = state->get<StreamMap>(Sym);
+    if (!SS)
+      continue;
+
+    if (SS->isOpened()) {
+      ExplodedNode *N = C.generateSink();
+      if (N) {
+        if (!BT_ResourceLeak)
+          BT_ResourceLeak.reset(new BuiltinBug("Resource Leak", 
+                         "Opened File never closed. Potential Resource leak."));
+        BugReport *R = new BugReport(*BT_ResourceLeak, 
+                                     BT_ResourceLeak->getDescription(), N);
+        C.emitReport(R);
+      }
+    }
+  }
+}
+
+void ento::registerStreamChecker(CheckerManager &mgr) {
+  mgr.registerChecker<StreamChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
new file mode 100644
index 0000000..264f7f9
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
@@ -0,0 +1,62 @@
+//== TaintTesterChecker.cpp ----------------------------------- -*- C++ -*--=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker can be used for testing how taint data is propagated.
+//
+//===----------------------------------------------------------------------===//
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class TaintTesterChecker : public Checker< check::PostStmt<Expr> > {
+
+  mutable OwningPtr<BugType> BT;
+  void initBugType() const;
+
+  /// Given a pointer argument, get the symbol of the value it contains
+  /// (points to).
+  SymbolRef getPointedToSymbol(CheckerContext &C,
+                               const Expr* Arg,
+                               bool IssueWarning = true) const;
+
+public:
+  void checkPostStmt(const Expr *E, CheckerContext &C) const;
+};
+}
+
+inline void TaintTesterChecker::initBugType() const {
+  if (!BT)
+    BT.reset(new BugType("Tainted data", "General"));
+}
+
+void TaintTesterChecker::checkPostStmt(const Expr *E,
+                                       CheckerContext &C) const {
+  ProgramStateRef State = C.getState();
+  if (!State)
+    return;
+
+  if (State->isTainted(E, C.getLocationContext())) {
+    if (ExplodedNode *N = C.addTransition()) {
+      initBugType();
+      BugReport *report = new BugReport(*BT, "tainted",N);
+      report->addRange(E->getSourceRange());
+      C.emitReport(report);
+    }
+  }
+}
+
+void ento::registerTaintTesterChecker(CheckerManager &mgr) {
+  mgr.registerChecker<TaintTesterChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp
new file mode 100644
index 0000000..57c9ed4
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/TraversalChecker.cpp
@@ -0,0 +1,107 @@
+//== TraversalChecker.cpp -------------------------------------- -*- C++ -*--=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These checkers print various aspects of the ExprEngine's traversal of the CFG
+// as it builds the ExplodedGraph.
+//
+//===----------------------------------------------------------------------===//
+#include "ClangSACheckers.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class TraversalDumper : public Checker< check::BranchCondition,
+                                        check::EndFunction > {
+public:
+  void checkBranchCondition(const Stmt *Condition, CheckerContext &C) const;
+  void checkEndFunction(CheckerContext &C) const;
+};
+}
+
+void TraversalDumper::checkBranchCondition(const Stmt *Condition,
+                                           CheckerContext &C) const {
+  // Special-case Objective-C's for-in loop, which uses the entire loop as its
+  // condition. We just print the collection expression.
+  const Stmt *Parent = dyn_cast<ObjCForCollectionStmt>(Condition);
+  if (!Parent) {
+    const ParentMap &Parents = C.getLocationContext()->getParentMap();
+    Parent = Parents.getParent(Condition);
+  }
+
+  // It is mildly evil to print directly to llvm::outs() rather than emitting
+  // warnings, but this ensures things do not get filtered out by the rest of
+  // the static analyzer machinery.
+  SourceLocation Loc = Parent->getLocStart();
+  llvm::outs() << C.getSourceManager().getSpellingLineNumber(Loc) << " "
+               << Parent->getStmtClassName() << "\n";
+}
+
+void TraversalDumper::checkEndFunction(CheckerContext &C) const {
+  llvm::outs() << "--END FUNCTION--\n";
+}
+
+void ento::registerTraversalDumper(CheckerManager &mgr) {
+  mgr.registerChecker<TraversalDumper>();
+}
+
+//------------------------------------------------------------------------------
+
+namespace {
+class CallDumper : public Checker< check::PreCall,
+                                   check::PostCall > {
+public:
+  void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+  void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+};
+}
+
+void CallDumper::checkPreCall(const CallEvent &Call, CheckerContext &C) const {
+  unsigned Indentation = 0;
+  for (const LocationContext *LC = C.getLocationContext()->getParent();
+       LC != 0; LC = LC->getParent())
+    ++Indentation;
+
+  // It is mildly evil to print directly to llvm::outs() rather than emitting
+  // warnings, but this ensures things do not get filtered out by the rest of
+  // the static analyzer machinery.
+  llvm::outs().indent(Indentation);
+  Call.dump(llvm::outs());
+}
+
+void CallDumper::checkPostCall(const CallEvent &Call, CheckerContext &C) const {
+  const Expr *CallE = Call.getOriginExpr();
+  if (!CallE)
+    return;
+
+  unsigned Indentation = 0;
+  for (const LocationContext *LC = C.getLocationContext()->getParent();
+       LC != 0; LC = LC->getParent())
+    ++Indentation;
+
+  // It is mildly evil to print directly to llvm::outs() rather than emitting
+  // warnings, but this ensures things do not get filtered out by the rest of
+  // the static analyzer machinery.
+  llvm::outs().indent(Indentation);
+  if (Call.getResultType()->isVoidType())
+    llvm::outs() << "Returning void\n";
+  else
+    llvm::outs() << "Returning " << C.getSVal(CallE) << "\n";
+}
+
+void ento::registerCallDumper(CheckerManager &mgr) {
+  mgr.registerChecker<CallDumper>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
new file mode 100644
index 0000000..8235e68
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
@@ -0,0 +1,112 @@
+//=== UndefBranchChecker.cpp -----------------------------------*- C++ -*--===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines UndefBranchChecker, which checks for undefined branch
+// condition.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class UndefBranchChecker : public Checker<check::BranchCondition> {
+  mutable OwningPtr<BuiltinBug> BT;
+
+  struct FindUndefExpr {
+    ProgramStateRef St;
+    const LocationContext *LCtx;
+
+    FindUndefExpr(ProgramStateRef S, const LocationContext *L) 
+      : St(S), LCtx(L) {}
+
+    const Expr *FindExpr(const Expr *Ex) {
+      if (!MatchesCriteria(Ex))
+        return 0;
+
+      for (Stmt::const_child_iterator I = Ex->child_begin(), 
+                                      E = Ex->child_end();I!=E;++I)
+        if (const Expr *ExI = dyn_cast_or_null<Expr>(*I)) {
+          const Expr *E2 = FindExpr(ExI);
+          if (E2) return E2;
+        }
+
+      return Ex;
+    }
+
+    bool MatchesCriteria(const Expr *Ex) { 
+      return St->getSVal(Ex, LCtx).isUndef();
+    }
+  };
+
+public:
+  void checkBranchCondition(const Stmt *Condition, CheckerContext &Ctx) const;
+};
+
+}
+
+void UndefBranchChecker::checkBranchCondition(const Stmt *Condition,
+                                              CheckerContext &Ctx) const {
+  SVal X = Ctx.getState()->getSVal(Condition, Ctx.getLocationContext());
+  if (X.isUndef()) {
+    // Generate a sink node, which implicitly marks both outgoing branches as
+    // infeasible.
+    ExplodedNode *N = Ctx.generateSink();
+    if (N) {
+      if (!BT)
+        BT.reset(
+               new BuiltinBug("Branch condition evaluates to a garbage value"));
+
+      // What's going on here: we want to highlight the subexpression of the
+      // condition that is the most likely source of the "uninitialized
+      // branch condition."  We do a recursive walk of the condition's
+      // subexpressions and roughly look for the most nested subexpression
+      // that binds to Undefined.  We then highlight that expression's range.
+
+      // Get the predecessor node and check if is a PostStmt with the Stmt
+      // being the terminator condition.  We want to inspect the state
+      // of that node instead because it will contain main information about
+      // the subexpressions.
+
+      // Note: any predecessor will do.  They should have identical state,
+      // since all the BlockEdge did was act as an error sink since the value
+      // had to already be undefined.
+      assert (!N->pred_empty());
+      const Expr *Ex = cast<Expr>(Condition);
+      ExplodedNode *PrevN = *N->pred_begin();
+      ProgramPoint P = PrevN->getLocation();
+      ProgramStateRef St = N->getState();
+
+      if (Optional<PostStmt> PS = P.getAs<PostStmt>())
+        if (PS->getStmt() == Ex)
+          St = PrevN->getState();
+
+      FindUndefExpr FindIt(St, Ctx.getLocationContext());
+      Ex = FindIt.FindExpr(Ex);
+
+      // Emit the bug report.
+      BugReport *R = new BugReport(*BT, BT->getDescription(), N);
+      bugreporter::trackNullOrUndefValue(N, Ex, *R);
+      R->addRange(Ex->getSourceRange());
+
+      Ctx.emitReport(R);
+    }
+  }
+}
+
+void ento::registerUndefBranchChecker(CheckerManager &mgr) {
+  mgr.registerChecker<UndefBranchChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
new file mode 100644
index 0000000..93812f7
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
@@ -0,0 +1,106 @@
+// UndefCapturedBlockVarChecker.cpp - Uninitialized captured vars -*- C++ -*-=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker detects blocks that capture uninitialized values.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/Attr.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class UndefCapturedBlockVarChecker
+  : public Checker< check::PostStmt<BlockExpr> > {
+ mutable OwningPtr<BugType> BT;
+
+public:
+  void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const;
+};
+} // end anonymous namespace
+
+static const DeclRefExpr *FindBlockDeclRefExpr(const Stmt *S,
+                                               const VarDecl *VD) {
+  if (const DeclRefExpr *BR = dyn_cast<DeclRefExpr>(S))
+    if (BR->getDecl() == VD)
+      return BR;
+
+  for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
+       I!=E; ++I)
+    if (const Stmt *child = *I) {
+      const DeclRefExpr *BR = FindBlockDeclRefExpr(child, VD);
+      if (BR)
+        return BR;
+    }
+
+  return NULL;
+}
+
+void
+UndefCapturedBlockVarChecker::checkPostStmt(const BlockExpr *BE,
+                                            CheckerContext &C) const {
+  if (!BE->getBlockDecl()->hasCaptures())
+    return;
+
+  ProgramStateRef state = C.getState();
+  const BlockDataRegion *R =
+    cast<BlockDataRegion>(state->getSVal(BE,
+                                         C.getLocationContext()).getAsRegion());
+
+  BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(),
+                                            E = R->referenced_vars_end();
+
+  for (; I != E; ++I) {
+    // This VarRegion is the region associated with the block; we need
+    // the one associated with the encompassing context.
+    const VarRegion *VR = I.getCapturedRegion();
+    const VarDecl *VD = VR->getDecl();
+
+    if (VD->getAttr<BlocksAttr>() || !VD->hasLocalStorage())
+      continue;
+
+    // Get the VarRegion associated with VD in the local stack frame.
+    if (Optional<UndefinedVal> V =
+          state->getSVal(I.getOriginalRegion()).getAs<UndefinedVal>()) {
+      if (ExplodedNode *N = C.generateSink()) {
+        if (!BT)
+          BT.reset(new BuiltinBug("uninitialized variable captured by block"));
+
+        // Generate a bug report.
+        SmallString<128> buf;
+        llvm::raw_svector_ostream os(buf);
+
+        os << "Variable '" << VD->getName() 
+           << "' is uninitialized when captured by block";
+
+        BugReport *R = new BugReport(*BT, os.str(), N);
+        if (const Expr *Ex = FindBlockDeclRefExpr(BE->getBody(), VD))
+          R->addRange(Ex->getSourceRange());
+        R->addVisitor(new FindLastStoreBRVisitor(*V, VR,
+                                             /*EnableNullFPSuppression*/false));
+        R->disablePathPruning();
+        // need location of block
+        C.emitReport(R);
+      }
+    }
+  }
+}
+
+void ento::registerUndefCapturedBlockVarChecker(CheckerManager &mgr) {
+  mgr.registerChecker<UndefCapturedBlockVarChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
new file mode 100644
index 0000000..6733563
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
@@ -0,0 +1,91 @@
+//=== UndefResultChecker.cpp ------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines UndefResultChecker, a builtin check in ExprEngine that 
+// performs checks for undefined results of non-assignment binary operators.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class UndefResultChecker 
+  : public Checker< check::PostStmt<BinaryOperator> > {
+
+  mutable OwningPtr<BugType> BT;
+  
+public:
+  void checkPostStmt(const BinaryOperator *B, CheckerContext &C) const;
+};
+} // end anonymous namespace
+
+void UndefResultChecker::checkPostStmt(const BinaryOperator *B,
+                                       CheckerContext &C) const {
+  ProgramStateRef state = C.getState();
+  const LocationContext *LCtx = C.getLocationContext();
+  if (state->getSVal(B, LCtx).isUndef()) {
+    // Generate an error node.
+    ExplodedNode *N = C.generateSink();
+    if (!N)
+      return;
+    
+    if (!BT)
+      BT.reset(new BuiltinBug("Result of operation is garbage or undefined"));
+
+    SmallString<256> sbuf;
+    llvm::raw_svector_ostream OS(sbuf);
+    const Expr *Ex = NULL;
+    bool isLeft = true;
+    
+    if (state->getSVal(B->getLHS(), LCtx).isUndef()) {
+      Ex = B->getLHS()->IgnoreParenCasts();
+      isLeft = true;
+    }
+    else if (state->getSVal(B->getRHS(), LCtx).isUndef()) {
+      Ex = B->getRHS()->IgnoreParenCasts();
+      isLeft = false;
+    }
+    
+    if (Ex) {
+      OS << "The " << (isLeft ? "left" : "right")
+         << " operand of '"
+         << BinaryOperator::getOpcodeStr(B->getOpcode())
+         << "' is a garbage value";
+    }          
+    else {
+      // Neither operand was undefined, but the result is undefined.
+      OS << "The result of the '"
+         << BinaryOperator::getOpcodeStr(B->getOpcode())
+         << "' expression is undefined";
+    }
+    BugReport *report = new BugReport(*BT, OS.str(), N);
+    if (Ex) {
+      report->addRange(Ex->getSourceRange());
+      bugreporter::trackNullOrUndefValue(N, Ex, *report);
+    }
+    else
+      bugreporter::trackNullOrUndefValue(N, B, *report);
+    
+    C.emitReport(report);
+  }
+}
+
+void ento::registerUndefResultChecker(CheckerManager &mgr) {
+  mgr.registerChecker<UndefResultChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
new file mode 100644
index 0000000..176ee48
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
@@ -0,0 +1,63 @@
+//===--- UndefinedArraySubscriptChecker.h ----------------------*- C++ -*--===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines UndefinedArraySubscriptChecker, a builtin check in ExprEngine
+// that performs checks for undefined array subscripts.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class UndefinedArraySubscriptChecker
+  : public Checker< check::PreStmt<ArraySubscriptExpr> > {
+  mutable OwningPtr<BugType> BT;
+
+public:
+  void checkPreStmt(const ArraySubscriptExpr *A, CheckerContext &C) const;
+};
+} // end anonymous namespace
+
+void 
+UndefinedArraySubscriptChecker::checkPreStmt(const ArraySubscriptExpr *A,
+                                             CheckerContext &C) const {
+  const Expr *Index = A->getIdx();
+  if (!C.getSVal(Index).isUndef())
+    return;
+
+  // Sema generates anonymous array variables for copying array struct fields.
+  // Don't warn if we're in an implicitly-generated constructor.
+  const Decl *D = C.getLocationContext()->getDecl();
+  if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(D))
+    if (Ctor->isImplicitlyDefined())
+      return;
+
+  ExplodedNode *N = C.generateSink();
+  if (!N)
+    return;
+  if (!BT)
+    BT.reset(new BuiltinBug("Array subscript is undefined"));
+
+  // Generate a report for this bug.
+  BugReport *R = new BugReport(*BT, BT->getName(), N);
+  R->addRange(A->getIdx()->getSourceRange());
+  bugreporter::trackNullOrUndefValue(N, A->getIdx(), *R);
+  C.emitReport(R);
+}
+
+void ento::registerUndefinedArraySubscriptChecker(CheckerManager &mgr) {
+  mgr.registerChecker<UndefinedArraySubscriptChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
new file mode 100644
index 0000000..e04f49c
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
@@ -0,0 +1,88 @@
+//===--- UndefinedAssignmentChecker.h ---------------------------*- C++ -*--==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines UndefinedAssignmentChecker, a builtin check in ExprEngine that
+// checks for assigning undefined values.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class UndefinedAssignmentChecker
+  : public Checker<check::Bind> {
+  mutable OwningPtr<BugType> BT;
+
+public:
+  void checkBind(SVal location, SVal val, const Stmt *S,
+                 CheckerContext &C) const;
+};
+}
+
+void UndefinedAssignmentChecker::checkBind(SVal location, SVal val,
+                                           const Stmt *StoreE,
+                                           CheckerContext &C) const {
+  if (!val.isUndef())
+    return;
+
+  ExplodedNode *N = C.generateSink();
+
+  if (!N)
+    return;
+
+  const char *str = "Assigned value is garbage or undefined";
+
+  if (!BT)
+    BT.reset(new BuiltinBug(str));
+
+  // Generate a report for this bug.
+  const Expr *ex = 0;
+
+  while (StoreE) {
+    if (const BinaryOperator *B = dyn_cast<BinaryOperator>(StoreE)) {
+      if (B->isCompoundAssignmentOp()) {
+        ProgramStateRef state = C.getState();
+        if (state->getSVal(B->getLHS(), C.getLocationContext()).isUndef()) {
+          str = "The left expression of the compound assignment is an "
+                "uninitialized value. The computed value will also be garbage";
+          ex = B->getLHS();
+          break;
+        }
+      }
+
+      ex = B->getRHS();
+      break;
+    }
+
+    if (const DeclStmt *DS = dyn_cast<DeclStmt>(StoreE)) {
+      const VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl());
+      ex = VD->getInit();
+    }
+
+    break;
+  }
+
+  BugReport *R = new BugReport(*BT, str, N);
+  if (ex) {
+    R->addRange(ex->getSourceRange());
+    bugreporter::trackNullOrUndefValue(N, ex, *R);
+  }
+  C.emitReport(R);
+}
+
+void ento::registerUndefinedAssignmentChecker(CheckerManager &mgr) {
+  mgr.registerChecker<UndefinedAssignmentChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
new file mode 100644
index 0000000..4ea07e2
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
@@ -0,0 +1,363 @@
+//= UnixAPIChecker.h - Checks preconditions for various Unix APIs --*- C++ -*-//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines UnixAPIChecker, which is an assortment of checks on calls
+// to various, widely used UNIX/Posix functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/raw_ostream.h"
+#include <fcntl.h>
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class UnixAPIChecker : public Checker< check::PreStmt<CallExpr> > {
+  mutable OwningPtr<BugType> BT_open, BT_pthreadOnce, BT_mallocZero;
+  mutable Optional<uint64_t> Val_O_CREAT;
+
+public:
+  void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+
+  void CheckOpen(CheckerContext &C, const CallExpr *CE) const;
+  void CheckPthreadOnce(CheckerContext &C, const CallExpr *CE) const;
+  void CheckCallocZero(CheckerContext &C, const CallExpr *CE) const;
+  void CheckMallocZero(CheckerContext &C, const CallExpr *CE) const;
+  void CheckReallocZero(CheckerContext &C, const CallExpr *CE) const;
+  void CheckReallocfZero(CheckerContext &C, const CallExpr *CE) const;
+  void CheckAllocaZero(CheckerContext &C, const CallExpr *CE) const;
+  void CheckVallocZero(CheckerContext &C, const CallExpr *CE) const;
+
+  typedef void (UnixAPIChecker::*SubChecker)(CheckerContext &,
+                                             const CallExpr *) const;
+private:
+  bool ReportZeroByteAllocation(CheckerContext &C,
+                                ProgramStateRef falseState,
+                                const Expr *arg,
+                                const char *fn_name) const;
+  void BasicAllocationCheck(CheckerContext &C,
+                            const CallExpr *CE,
+                            const unsigned numArgs,
+                            const unsigned sizeArg,
+                            const char *fn) const;
+};
+} //end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Utility functions.
+//===----------------------------------------------------------------------===//
+
+static inline void LazyInitialize(OwningPtr<BugType> &BT,
+                                  const char *name) {
+  if (BT)
+    return;
+  BT.reset(new BugType(name, categories::UnixAPI));
+}
+
+//===----------------------------------------------------------------------===//
+// "open" (man 2 open)
+//===----------------------------------------------------------------------===//
+
+void UnixAPIChecker::CheckOpen(CheckerContext &C, const CallExpr *CE) const {
+  // The definition of O_CREAT is platform specific.  We need a better way
+  // of querying this information from the checking environment.
+  if (!Val_O_CREAT.hasValue()) {
+    if (C.getASTContext().getTargetInfo().getTriple().getVendor() 
+                                                      == llvm::Triple::Apple)
+      Val_O_CREAT = 0x0200;
+    else {
+      // FIXME: We need a more general way of getting the O_CREAT value.
+      // We could possibly grovel through the preprocessor state, but
+      // that would require passing the Preprocessor object to the ExprEngine.
+      return;
+    }
+  }
+
+  // Look at the 'oflags' argument for the O_CREAT flag.
+  ProgramStateRef state = C.getState();
+
+  if (CE->getNumArgs() < 2) {
+    // The frontend should issue a warning for this case, so this is a sanity
+    // check.
+    return;
+  }
+
+  // Now check if oflags has O_CREAT set.
+  const Expr *oflagsEx = CE->getArg(1);
+  const SVal V = state->getSVal(oflagsEx, C.getLocationContext());
+  if (!V.getAs<NonLoc>()) {
+    // The case where 'V' can be a location can only be due to a bad header,
+    // so in this case bail out.
+    return;
+  }
+  NonLoc oflags = V.castAs<NonLoc>();
+  NonLoc ocreateFlag = C.getSValBuilder()
+      .makeIntVal(Val_O_CREAT.getValue(), oflagsEx->getType()).castAs<NonLoc>();
+  SVal maskedFlagsUC = C.getSValBuilder().evalBinOpNN(state, BO_And,
+                                                      oflags, ocreateFlag,
+                                                      oflagsEx->getType());
+  if (maskedFlagsUC.isUnknownOrUndef())
+    return;
+  DefinedSVal maskedFlags = maskedFlagsUC.castAs<DefinedSVal>();
+
+  // Check if maskedFlags is non-zero.
+  ProgramStateRef trueState, falseState;
+  llvm::tie(trueState, falseState) = state->assume(maskedFlags);
+
+  // Only emit an error if the value of 'maskedFlags' is properly
+  // constrained;
+  if (!(trueState && !falseState))
+    return;
+
+  if (CE->getNumArgs() < 3) {
+    ExplodedNode *N = C.generateSink(trueState);
+    if (!N)
+      return;
+
+    LazyInitialize(BT_open, "Improper use of 'open'");
+
+    BugReport *report =
+      new BugReport(*BT_open,
+                            "Call to 'open' requires a third argument when "
+                            "the 'O_CREAT' flag is set", N);
+    report->addRange(oflagsEx->getSourceRange());
+    C.emitReport(report);
+  }
+}
+
+//===----------------------------------------------------------------------===//
+// pthread_once
+//===----------------------------------------------------------------------===//
+
+void UnixAPIChecker::CheckPthreadOnce(CheckerContext &C,
+                                      const CallExpr *CE) const {
+
+  // This is similar to 'CheckDispatchOnce' in the MacOSXAPIChecker.
+  // They can possibly be refactored.
+
+  if (CE->getNumArgs() < 1)
+    return;
+
+  // Check if the first argument is stack allocated.  If so, issue a warning
+  // because that's likely to be bad news.
+  ProgramStateRef state = C.getState();
+  const MemRegion *R =
+    state->getSVal(CE->getArg(0), C.getLocationContext()).getAsRegion();
+  if (!R || !isa<StackSpaceRegion>(R->getMemorySpace()))
+    return;
+
+  ExplodedNode *N = C.generateSink(state);
+  if (!N)
+    return;
+
+  SmallString<256> S;
+  llvm::raw_svector_ostream os(S);
+  os << "Call to 'pthread_once' uses";
+  if (const VarRegion *VR = dyn_cast<VarRegion>(R))
+    os << " the local variable '" << VR->getDecl()->getName() << '\'';
+  else
+    os << " stack allocated memory";
+  os << " for the \"control\" value.  Using such transient memory for "
+  "the control value is potentially dangerous.";
+  if (isa<VarRegion>(R) && isa<StackLocalsSpaceRegion>(R->getMemorySpace()))
+    os << "  Perhaps you intended to declare the variable as 'static'?";
+
+  LazyInitialize(BT_pthreadOnce, "Improper use of 'pthread_once'");
+
+  BugReport *report = new BugReport(*BT_pthreadOnce, os.str(), N);
+  report->addRange(CE->getArg(0)->getSourceRange());
+  C.emitReport(report);
+}
+
+//===----------------------------------------------------------------------===//
+// "calloc", "malloc", "realloc", "reallocf", "alloca" and "valloc"
+// with allocation size 0
+//===----------------------------------------------------------------------===//
+// FIXME: Eventually these should be rolled into the MallocChecker, but right now
+// they're more basic and valuable for widespread use.
+
+// Returns true if we try to do a zero byte allocation, false otherwise.
+// Fills in trueState and falseState.
+static bool IsZeroByteAllocation(ProgramStateRef state,
+                                const SVal argVal,
+                                ProgramStateRef *trueState,
+                                ProgramStateRef *falseState) {
+  llvm::tie(*trueState, *falseState) =
+    state->assume(argVal.castAs<DefinedSVal>());
+  
+  return (*falseState && !*trueState);
+}
+
+// Generates an error report, indicating that the function whose name is given
+// will perform a zero byte allocation.
+// Returns false if an error occured, true otherwise.
+bool UnixAPIChecker::ReportZeroByteAllocation(CheckerContext &C,
+                                              ProgramStateRef falseState,
+                                              const Expr *arg,
+                                              const char *fn_name) const {
+  ExplodedNode *N = C.generateSink(falseState);
+  if (!N)
+    return false;
+
+  LazyInitialize(BT_mallocZero,
+    "Undefined allocation of 0 bytes (CERT MEM04-C; CWE-131)");
+
+  SmallString<256> S;
+  llvm::raw_svector_ostream os(S);    
+  os << "Call to '" << fn_name << "' has an allocation size of 0 bytes";
+  BugReport *report = new BugReport(*BT_mallocZero, os.str(), N);
+
+  report->addRange(arg->getSourceRange());
+  bugreporter::trackNullOrUndefValue(N, arg, *report);
+  C.emitReport(report);
+
+  return true;
+}
+
+// Does a basic check for 0-sized allocations suitable for most of the below
+// functions (modulo "calloc")
+void UnixAPIChecker::BasicAllocationCheck(CheckerContext &C,
+                                          const CallExpr *CE,
+                                          const unsigned numArgs,
+                                          const unsigned sizeArg,
+                                          const char *fn) const {
+  // Sanity check for the correct number of arguments
+  if (CE->getNumArgs() != numArgs)
+    return;
+
+  // Check if the allocation size is 0.
+  ProgramStateRef state = C.getState();
+  ProgramStateRef trueState = NULL, falseState = NULL;
+  const Expr *arg = CE->getArg(sizeArg);
+  SVal argVal = state->getSVal(arg, C.getLocationContext());
+
+  if (argVal.isUnknownOrUndef())
+    return;
+
+  // Is the value perfectly constrained to zero?
+  if (IsZeroByteAllocation(state, argVal, &trueState, &falseState)) {
+    (void) ReportZeroByteAllocation(C, falseState, arg, fn); 
+    return;
+  }
+  // Assume the value is non-zero going forward.
+  assert(trueState);
+  if (trueState != state)
+    C.addTransition(trueState);                           
+}
+
+void UnixAPIChecker::CheckCallocZero(CheckerContext &C,
+                                     const CallExpr *CE) const {
+  unsigned int nArgs = CE->getNumArgs();
+  if (nArgs != 2)
+    return;
+
+  ProgramStateRef state = C.getState();
+  ProgramStateRef trueState = NULL, falseState = NULL;
+
+  unsigned int i;
+  for (i = 0; i < nArgs; i++) {
+    const Expr *arg = CE->getArg(i);
+    SVal argVal = state->getSVal(arg, C.getLocationContext());
+    if (argVal.isUnknownOrUndef()) {
+      if (i == 0)
+        continue;
+      else
+        return;
+    }
+
+    if (IsZeroByteAllocation(state, argVal, &trueState, &falseState)) {
+      if (ReportZeroByteAllocation(C, falseState, arg, "calloc"))
+        return;
+      else if (i == 0)
+        continue;
+      else
+        return;
+    }
+  }
+
+  // Assume the value is non-zero going forward.
+  assert(trueState);
+  if (trueState != state)
+    C.addTransition(trueState);
+}
+
+void UnixAPIChecker::CheckMallocZero(CheckerContext &C,
+                                     const CallExpr *CE) const {
+  BasicAllocationCheck(C, CE, 1, 0, "malloc");
+}
+
+void UnixAPIChecker::CheckReallocZero(CheckerContext &C,
+                                      const CallExpr *CE) const {
+  BasicAllocationCheck(C, CE, 2, 1, "realloc");
+}
+
+void UnixAPIChecker::CheckReallocfZero(CheckerContext &C,
+                                       const CallExpr *CE) const {
+  BasicAllocationCheck(C, CE, 2, 1, "reallocf");
+}
+
+void UnixAPIChecker::CheckAllocaZero(CheckerContext &C,
+                                     const CallExpr *CE) const {
+  BasicAllocationCheck(C, CE, 1, 0, "alloca");
+}
+
+void UnixAPIChecker::CheckVallocZero(CheckerContext &C,
+                                     const CallExpr *CE) const {
+  BasicAllocationCheck(C, CE, 1, 0, "valloc");
+}
+
+
+//===----------------------------------------------------------------------===//
+// Central dispatch function.
+//===----------------------------------------------------------------------===//
+
+void UnixAPIChecker::checkPreStmt(const CallExpr *CE,
+                                  CheckerContext &C) const {
+  const FunctionDecl *FD = C.getCalleeDecl(CE);
+  if (!FD || FD->getKind() != Decl::Function)
+    return;
+
+  StringRef FName = C.getCalleeName(FD);
+  if (FName.empty())
+    return;
+
+  SubChecker SC =
+    llvm::StringSwitch<SubChecker>(FName)
+      .Case("open", &UnixAPIChecker::CheckOpen)
+      .Case("pthread_once", &UnixAPIChecker::CheckPthreadOnce)
+      .Case("calloc", &UnixAPIChecker::CheckCallocZero)
+      .Case("malloc", &UnixAPIChecker::CheckMallocZero)
+      .Case("realloc", &UnixAPIChecker::CheckReallocZero)
+      .Case("reallocf", &UnixAPIChecker::CheckReallocfZero)
+      .Cases("alloca", "__builtin_alloca", &UnixAPIChecker::CheckAllocaZero)
+      .Case("valloc", &UnixAPIChecker::CheckVallocZero)
+      .Default(NULL);
+
+  if (SC)
+    (this->*SC)(C, CE);
+}
+
+//===----------------------------------------------------------------------===//
+// Registration.
+//===----------------------------------------------------------------------===//
+
+void ento::registerUnixAPIChecker(CheckerManager &mgr) {
+  mgr.registerChecker<UnixAPIChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
new file mode 100644
index 0000000..91c2ffb
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
@@ -0,0 +1,247 @@
+//==- UnreachableCodeChecker.cpp - Generalized dead code checker -*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This file implements a generalized unreachable code checker using a
+// path-sensitive analysis. We mark any path visited, and then walk the CFG as a
+// post-analysis to determine what was never visited.
+//
+// A similar flow-sensitive only check exists in Analysis/ReachableCode.cpp
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "llvm/ADT/SmallSet.h"
+
+// The number of CFGBlock pointers we want to reserve memory for. This is used
+// once for each function we analyze.
+#define DEFAULT_CFGBLOCKS 256
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class UnreachableCodeChecker : public Checker<check::EndAnalysis> {
+public:
+  void checkEndAnalysis(ExplodedGraph &G, BugReporter &B,
+                        ExprEngine &Eng) const;
+private:
+  typedef llvm::SmallSet<unsigned, DEFAULT_CFGBLOCKS> CFGBlocksSet;
+
+  static inline const Stmt *getUnreachableStmt(const CFGBlock *CB);
+  static void FindUnreachableEntryPoints(const CFGBlock *CB,
+                                         CFGBlocksSet &reachable,
+                                         CFGBlocksSet &visited);
+  static bool isInvalidPath(const CFGBlock *CB, const ParentMap &PM);
+  static inline bool isEmptyCFGBlock(const CFGBlock *CB);
+};
+}
+
+void UnreachableCodeChecker::checkEndAnalysis(ExplodedGraph &G,
+                                              BugReporter &B,
+                                              ExprEngine &Eng) const {
+  CFGBlocksSet reachable, visited;
+  
+  if (Eng.hasWorkRemaining())
+    return;
+
+  const Decl *D = 0;
+  CFG *C = 0;
+  ParentMap *PM = 0;
+  const LocationContext *LC = 0;
+  // Iterate over ExplodedGraph
+  for (ExplodedGraph::node_iterator I = G.nodes_begin(), E = G.nodes_end();
+      I != E; ++I) {
+    const ProgramPoint &P = I->getLocation();
+    LC = P.getLocationContext();
+
+    if (!D)
+      D = LC->getAnalysisDeclContext()->getDecl();
+    // Save the CFG if we don't have it already
+    if (!C)
+      C = LC->getAnalysisDeclContext()->getUnoptimizedCFG();
+    if (!PM)
+      PM = &LC->getParentMap();
+
+    if (Optional<BlockEntrance> BE = P.getAs<BlockEntrance>()) {
+      const CFGBlock *CB = BE->getBlock();
+      reachable.insert(CB->getBlockID());
+    }
+  }
+
+  // Bail out if we didn't get the CFG or the ParentMap.
+  if (!D || !C || !PM)
+    return;
+  
+  // Don't do anything for template instantiations.  Proving that code
+  // in a template instantiation is unreachable means proving that it is
+  // unreachable in all instantiations.
+  if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+    if (FD->isTemplateInstantiation())
+      return;
+
+  // Find CFGBlocks that were not covered by any node
+  for (CFG::const_iterator I = C->begin(), E = C->end(); I != E; ++I) {
+    const CFGBlock *CB = *I;
+    // Check if the block is unreachable
+    if (reachable.count(CB->getBlockID()))
+      continue;
+
+    // Check if the block is empty (an artificial block)
+    if (isEmptyCFGBlock(CB))
+      continue;
+
+    // Find the entry points for this block
+    if (!visited.count(CB->getBlockID()))
+      FindUnreachableEntryPoints(CB, reachable, visited);
+
+    // This block may have been pruned; check if we still want to report it
+    if (reachable.count(CB->getBlockID()))
+      continue;
+
+    // Check for false positives
+    if (CB->size() > 0 && isInvalidPath(CB, *PM))
+      continue;
+
+    // It is good practice to always have a "default" label in a "switch", even
+    // if we should never get there. It can be used to detect errors, for
+    // instance. Unreachable code directly under a "default" label is therefore
+    // likely to be a false positive.
+    if (const Stmt *label = CB->getLabel())
+      if (label->getStmtClass() == Stmt::DefaultStmtClass)
+        continue;
+
+    // Special case for __builtin_unreachable.
+    // FIXME: This should be extended to include other unreachable markers,
+    // such as llvm_unreachable.
+    if (!CB->empty()) {
+      bool foundUnreachable = false;
+      for (CFGBlock::const_iterator ci = CB->begin(), ce = CB->end();
+           ci != ce; ++ci) {
+        if (Optional<CFGStmt> S = (*ci).getAs<CFGStmt>())
+          if (const CallExpr *CE = dyn_cast<CallExpr>(S->getStmt())) {
+            if (CE->isBuiltinCall() == Builtin::BI__builtin_unreachable) {
+              foundUnreachable = true;
+              break;
+            }
+          }
+      }
+      if (foundUnreachable)
+        continue;
+    }
+
+    // We found a block that wasn't covered - find the statement to report
+    SourceRange SR;
+    PathDiagnosticLocation DL;
+    SourceLocation SL;
+    if (const Stmt *S = getUnreachableStmt(CB)) {
+      SR = S->getSourceRange();
+      DL = PathDiagnosticLocation::createBegin(S, B.getSourceManager(), LC);
+      SL = DL.asLocation();
+      if (SR.isInvalid() || !SL.isValid())
+        continue;
+    }
+    else
+      continue;
+
+    // Check if the SourceLocation is in a system header
+    const SourceManager &SM = B.getSourceManager();
+    if (SM.isInSystemHeader(SL) || SM.isInExternCSystemHeader(SL))
+      continue;
+
+    B.EmitBasicReport(D, "Unreachable code", "Dead code",
+                      "This statement is never executed", DL, SR);
+  }
+}
+
+// Recursively finds the entry point(s) for this dead CFGBlock.
+void UnreachableCodeChecker::FindUnreachableEntryPoints(const CFGBlock *CB,
+                                                        CFGBlocksSet &reachable,
+                                                        CFGBlocksSet &visited) {
+  visited.insert(CB->getBlockID());
+
+  for (CFGBlock::const_pred_iterator I = CB->pred_begin(), E = CB->pred_end();
+      I != E; ++I) {
+    if (!reachable.count((*I)->getBlockID())) {
+      // If we find an unreachable predecessor, mark this block as reachable so
+      // we don't report this block
+      reachable.insert(CB->getBlockID());
+      if (!visited.count((*I)->getBlockID()))
+        // If we haven't previously visited the unreachable predecessor, recurse
+        FindUnreachableEntryPoints(*I, reachable, visited);
+    }
+  }
+}
+
+// Find the Stmt* in a CFGBlock for reporting a warning
+const Stmt *UnreachableCodeChecker::getUnreachableStmt(const CFGBlock *CB) {
+  for (CFGBlock::const_iterator I = CB->begin(), E = CB->end(); I != E; ++I) {
+    if (Optional<CFGStmt> S = I->getAs<CFGStmt>())
+      return S->getStmt();
+  }
+  if (const Stmt *S = CB->getTerminator())
+    return S;
+  else
+    return 0;
+}
+
+// Determines if the path to this CFGBlock contained an element that infers this
+// block is a false positive. We assume that FindUnreachableEntryPoints has
+// already marked only the entry points to any dead code, so we need only to
+// find the condition that led to this block (the predecessor of this block.)
+// There will never be more than one predecessor.
+bool UnreachableCodeChecker::isInvalidPath(const CFGBlock *CB,
+                                           const ParentMap &PM) {
+  // We only expect a predecessor size of 0 or 1. If it is >1, then an external
+  // condition has broken our assumption (for example, a sink being placed by
+  // another check). In these cases, we choose not to report.
+  if (CB->pred_size() > 1)
+    return true;
+
+  // If there are no predecessors, then this block is trivially unreachable
+  if (CB->pred_size() == 0)
+    return false;
+
+  const CFGBlock *pred = *CB->pred_begin();
+
+  // Get the predecessor block's terminator conditon
+  const Stmt *cond = pred->getTerminatorCondition();
+
+  //assert(cond && "CFGBlock's predecessor has a terminator condition");
+  // The previous assertion is invalid in some cases (eg do/while). Leaving
+  // reporting of these situations on at the moment to help triage these cases.
+  if (!cond)
+    return false;
+
+  // Run each of the checks on the conditions
+  if (containsMacro(cond) || containsEnum(cond)
+      || containsStaticLocal(cond) || containsBuiltinOffsetOf(cond)
+      || containsStmt<UnaryExprOrTypeTraitExpr>(cond))
+    return true;
+
+  return false;
+}
+
+// Returns true if the given CFGBlock is empty
+bool UnreachableCodeChecker::isEmptyCFGBlock(const CFGBlock *CB) {
+  return CB->getLabel() == 0       // No labels
+      && CB->size() == 0           // No statements
+      && CB->getTerminator() == 0; // No terminator
+}
+
+void ento::registerUnreachableCodeChecker(CheckerManager &mgr) {
+  mgr.registerChecker<UnreachableCodeChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
new file mode 100644
index 0000000..30aef06
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
@@ -0,0 +1,162 @@
+//=== VLASizeChecker.cpp - Undefined dereference checker --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines VLASizeChecker, a builtin check in ExprEngine that 
+// performs checks for declaration of VLA of undefined or zero size.
+// In addition, VLASizeChecker is responsible for defining the extent
+// of the MemRegion that represents a VLA.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class VLASizeChecker : public Checker< check::PreStmt<DeclStmt> > {
+  mutable OwningPtr<BugType> BT;
+  enum VLASize_Kind { VLA_Garbage, VLA_Zero, VLA_Tainted };
+
+  void reportBug(VLASize_Kind Kind,
+                 const Expr *SizeE,
+                 ProgramStateRef State,
+                 CheckerContext &C) const;
+public:
+  void checkPreStmt(const DeclStmt *DS, CheckerContext &C) const;
+};
+} // end anonymous namespace
+
+void VLASizeChecker::reportBug(VLASize_Kind Kind,
+                               const Expr *SizeE,
+                               ProgramStateRef State,
+                               CheckerContext &C) const {
+  // Generate an error node.
+  ExplodedNode *N = C.generateSink(State);
+  if (!N)
+    return;
+
+  if (!BT)
+    BT.reset(new BuiltinBug("Dangerous variable-length array (VLA) declaration"));
+
+  SmallString<256> buf;
+  llvm::raw_svector_ostream os(buf);
+  os << "Declared variable-length array (VLA) ";
+  switch (Kind) {
+  case VLA_Garbage:
+    os << "uses a garbage value as its size";
+    break;
+  case VLA_Zero:
+    os << "has zero size";
+    break;
+  case VLA_Tainted:
+    os << "has tainted size";
+    break;
+  }
+
+  BugReport *report = new BugReport(*BT, os.str(), N);
+  report->addRange(SizeE->getSourceRange());
+  bugreporter::trackNullOrUndefValue(N, SizeE, *report);
+  C.emitReport(report);
+  return;
+}
+
+void VLASizeChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
+  if (!DS->isSingleDecl())
+    return;
+  
+  const VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl());
+  if (!VD)
+    return;
+
+  ASTContext &Ctx = C.getASTContext();
+  const VariableArrayType *VLA = Ctx.getAsVariableArrayType(VD->getType());
+  if (!VLA)
+    return;
+
+  // FIXME: Handle multi-dimensional VLAs.
+  const Expr *SE = VLA->getSizeExpr();
+  ProgramStateRef state = C.getState();
+  SVal sizeV = state->getSVal(SE, C.getLocationContext());
+
+  if (sizeV.isUndef()) {
+    reportBug(VLA_Garbage, SE, state, C);
+    return;
+  }
+
+  // See if the size value is known. It can't be undefined because we would have
+  // warned about that already.
+  if (sizeV.isUnknown())
+    return;
+  
+  // Check if the size is tainted.
+  if (state->isTainted(sizeV)) {
+    reportBug(VLA_Tainted, SE, 0, C);
+    return;
+  }
+
+  // Check if the size is zero.
+  DefinedSVal sizeD = sizeV.castAs<DefinedSVal>();
+
+  ProgramStateRef stateNotZero, stateZero;
+  llvm::tie(stateNotZero, stateZero) = state->assume(sizeD);
+
+  if (stateZero && !stateNotZero) {
+    reportBug(VLA_Zero, SE, stateZero, C);
+    return;
+  }
+ 
+  // From this point on, assume that the size is not zero.
+  state = stateNotZero;
+
+  // VLASizeChecker is responsible for defining the extent of the array being
+  // declared. We do this by multiplying the array length by the element size,
+  // then matching that with the array region's extent symbol.
+
+  // Convert the array length to size_t.
+  SValBuilder &svalBuilder = C.getSValBuilder();
+  QualType SizeTy = Ctx.getSizeType();
+  NonLoc ArrayLength =
+      svalBuilder.evalCast(sizeD, SizeTy, SE->getType()).castAs<NonLoc>();
+
+  // Get the element size.
+  CharUnits EleSize = Ctx.getTypeSizeInChars(VLA->getElementType());
+  SVal EleSizeVal = svalBuilder.makeIntVal(EleSize.getQuantity(), SizeTy);
+
+  // Multiply the array length by the element size.
+  SVal ArraySizeVal = svalBuilder.evalBinOpNN(
+      state, BO_Mul, ArrayLength, EleSizeVal.castAs<NonLoc>(), SizeTy);
+
+  // Finally, assume that the array's extent matches the given size.
+  const LocationContext *LC = C.getLocationContext();
+  DefinedOrUnknownSVal Extent =
+    state->getRegion(VD, LC)->getExtent(svalBuilder);
+  DefinedOrUnknownSVal ArraySize = ArraySizeVal.castAs<DefinedOrUnknownSVal>();
+  DefinedOrUnknownSVal sizeIsKnown =
+    svalBuilder.evalEQ(state, Extent, ArraySize);
+  state = state->assume(sizeIsKnown, true);
+
+  // Assume should not fail at this point.
+  assert(state);
+
+  // Remember our assumptions!
+  C.addTransition(state);
+}
+
+void ento::registerVLASizeChecker(CheckerManager &mgr) {
+  mgr.registerChecker<VLASizeChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
new file mode 100644
index 0000000..06f01ad
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
@@ -0,0 +1,242 @@
+//=======- VirtualCallChecker.cpp --------------------------------*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines a checker that checks virtual function calls during 
+//  construction or destruction of C++ objects.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/SaveAndRestore.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class WalkAST : public StmtVisitor<WalkAST> {
+  BugReporter &BR;
+  AnalysisDeclContext *AC;
+
+  typedef const CallExpr * WorkListUnit;
+  typedef SmallVector<WorkListUnit, 20> DFSWorkList;
+
+  /// A vector representing the worklist which has a chain of CallExprs.
+  DFSWorkList WList;
+  
+  // PreVisited : A CallExpr to this FunctionDecl is in the worklist, but the
+  // body has not been visited yet.
+  // PostVisited : A CallExpr to this FunctionDecl is in the worklist, and the
+  // body has been visited.
+  enum Kind { NotVisited,
+              PreVisited,  /**< A CallExpr to this FunctionDecl is in the 
+                                worklist, but the body has not yet been
+                                visited. */
+              PostVisited  /**< A CallExpr to this FunctionDecl is in the
+                                worklist, and the body has been visited. */
+  };
+
+  /// A DenseMap that records visited states of FunctionDecls.
+  llvm::DenseMap<const FunctionDecl *, Kind> VisitedFunctions;
+
+  /// The CallExpr whose body is currently being visited.  This is used for
+  /// generating bug reports.  This is null while visiting the body of a
+  /// constructor or destructor.
+  const CallExpr *visitingCallExpr;
+  
+public:
+  WalkAST(BugReporter &br, AnalysisDeclContext *ac)
+    : BR(br),
+      AC(ac),
+      visitingCallExpr(0) {}
+  
+  bool hasWork() const { return !WList.empty(); }
+
+  /// This method adds a CallExpr to the worklist and marks the callee as
+  /// being PreVisited.
+  void Enqueue(WorkListUnit WLUnit) {
+    const FunctionDecl *FD = WLUnit->getDirectCallee();
+    if (!FD || !FD->getBody())
+      return;    
+    Kind &K = VisitedFunctions[FD];
+    if (K != NotVisited)
+      return;
+    K = PreVisited;
+    WList.push_back(WLUnit);
+  }
+
+  /// This method returns an item from the worklist without removing it.
+  WorkListUnit Dequeue() {
+    assert(!WList.empty());
+    return WList.back();    
+  }
+  
+  void Execute() {
+    while (hasWork()) {
+      WorkListUnit WLUnit = Dequeue();
+      const FunctionDecl *FD = WLUnit->getDirectCallee();
+      assert(FD && FD->getBody());
+
+      if (VisitedFunctions[FD] == PreVisited) {
+        // If the callee is PreVisited, walk its body.
+        // Visit the body.
+        SaveAndRestore<const CallExpr *> SaveCall(visitingCallExpr, WLUnit);
+        Visit(FD->getBody());
+        
+        // Mark the function as being PostVisited to indicate we have
+        // scanned the body.
+        VisitedFunctions[FD] = PostVisited;
+        continue;
+      }
+
+      // Otherwise, the callee is PostVisited.
+      // Remove it from the worklist.
+      assert(VisitedFunctions[FD] == PostVisited);
+      WList.pop_back();
+    }
+  }
+
+  // Stmt visitor methods.
+  void VisitCallExpr(CallExpr *CE);
+  void VisitCXXMemberCallExpr(CallExpr *CE);
+  void VisitStmt(Stmt *S) { VisitChildren(S); }
+  void VisitChildren(Stmt *S);
+  
+  void ReportVirtualCall(const CallExpr *CE, bool isPure);
+
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// AST walking.
+//===----------------------------------------------------------------------===//
+
+void WalkAST::VisitChildren(Stmt *S) {
+  for (Stmt::child_iterator I = S->child_begin(), E = S->child_end(); I!=E; ++I)
+    if (Stmt *child = *I)
+      Visit(child);
+}
+
+void WalkAST::VisitCallExpr(CallExpr *CE) {
+  VisitChildren(CE);
+  Enqueue(CE);
+}
+
+void WalkAST::VisitCXXMemberCallExpr(CallExpr *CE) {
+  VisitChildren(CE);
+  bool callIsNonVirtual = false;
+  
+  // Several situations to elide for checking.
+  if (MemberExpr *CME = dyn_cast<MemberExpr>(CE->getCallee())) {
+    // If the member access is fully qualified (i.e., X::F), then treat
+    // this as a non-virtual call and do not warn.
+    if (CME->getQualifier())
+      callIsNonVirtual = true;
+
+    // Elide analyzing the call entirely if the base pointer is not 'this'.
+    if (Expr *base = CME->getBase()->IgnoreImpCasts())
+      if (!isa<CXXThisExpr>(base))
+        return;
+  }
+
+  // Get the callee.
+  const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(CE->getDirectCallee());
+  if (MD && MD->isVirtual() && !callIsNonVirtual)
+    ReportVirtualCall(CE, MD->isPure());
+
+  Enqueue(CE);
+}
+
+void WalkAST::ReportVirtualCall(const CallExpr *CE, bool isPure) {
+  SmallString<100> buf;
+  llvm::raw_svector_ostream os(buf);
+  
+  os << "Call Path : ";
+  // Name of current visiting CallExpr.
+  os << *CE->getDirectCallee();
+
+  // Name of the CallExpr whose body is current walking.
+  if (visitingCallExpr)
+    os << " <-- " << *visitingCallExpr->getDirectCallee();
+  // Names of FunctionDecls in worklist with state PostVisited.
+  for (SmallVectorImpl<const CallExpr *>::iterator I = WList.end(),
+         E = WList.begin(); I != E; --I) {
+    const FunctionDecl *FD = (*(I-1))->getDirectCallee();
+    assert(FD);
+    if (VisitedFunctions[FD] == PostVisited)
+      os << " <-- " << *FD;
+  }
+
+  PathDiagnosticLocation CELoc =
+    PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+  SourceRange R = CE->getCallee()->getSourceRange();
+  
+  if (isPure) {
+    os << "\n" <<  "Call pure virtual functions during construction or "
+       << "destruction may leads undefined behaviour";
+    BR.EmitBasicReport(AC->getDecl(),
+                       "Call pure virtual function during construction or "
+                       "Destruction",
+                       "Cplusplus",
+                       os.str(), CELoc, &R, 1);
+    return;
+  }
+  else {
+    os << "\n" << "Call virtual functions during construction or "
+       << "destruction will never go to a more derived class";
+    BR.EmitBasicReport(AC->getDecl(),
+                       "Call virtual function during construction or "
+                       "Destruction",
+                       "Cplusplus",
+                       os.str(), CELoc, &R, 1);
+    return;
+  }
+}
+
+//===----------------------------------------------------------------------===//
+// VirtualCallChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class VirtualCallChecker : public Checker<check::ASTDecl<CXXRecordDecl> > {
+public:
+  void checkASTDecl(const CXXRecordDecl *RD, AnalysisManager& mgr,
+                    BugReporter &BR) const {
+    WalkAST walker(BR, mgr.getAnalysisDeclContext(RD));
+
+    // Check the constructors.
+    for (CXXRecordDecl::ctor_iterator I = RD->ctor_begin(), E = RD->ctor_end();
+         I != E; ++I) {
+      if (!I->isCopyOrMoveConstructor())
+        if (Stmt *Body = I->getBody()) {
+          walker.Visit(Body);
+          walker.Execute();
+        }
+    }
+
+    // Check the destructor.
+    if (CXXDestructorDecl *DD = RD->getDestructor())
+      if (Stmt *Body = DD->getBody()) {
+        walker.Visit(Body);
+        walker.Execute();
+      }
+  }
+};
+}
+
+void ento::registerVirtualCallChecker(CheckerManager &mgr) {
+  mgr.registerChecker<VirtualCallChecker>();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/APSIntType.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/APSIntType.cpp
new file mode 100644
index 0000000..c7e9526
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/APSIntType.cpp
@@ -0,0 +1,49 @@
+//===--- APSIntType.cpp - Simple record of the type of APSInts ------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
+
+using namespace clang;
+using namespace ento;
+
+APSIntType::RangeTestResultKind
+APSIntType::testInRange(const llvm::APSInt &Value,
+                        bool AllowSignConversions) const {
+
+  // Negative numbers cannot be losslessly converted to unsigned type.
+  if (IsUnsigned && !AllowSignConversions &&
+      Value.isSigned() && Value.isNegative())
+    return RTR_Below;
+
+  unsigned MinBits;
+  if (AllowSignConversions) {
+    if (Value.isSigned() && !IsUnsigned)
+      MinBits = Value.getMinSignedBits();
+    else
+      MinBits = Value.getActiveBits();
+
+  } else {
+    // Signed integers can be converted to signed integers of the same width
+    // or (if positive) unsigned integers with one fewer bit.
+    // Unsigned integers can be converted to unsigned integers of the same width
+    // or signed integers with one more bit.
+    if (Value.isSigned())
+      MinBits = Value.getMinSignedBits() - IsUnsigned;
+    else
+      MinBits = Value.getActiveBits() + !IsUnsigned;
+  }
+
+  if (MinBits <= BitWidth)
+    return RTR_Within;
+
+  if (Value.isSigned() && Value.isNegative())
+    return RTR_Below;
+  else
+    return RTR_Above;
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp
new file mode 100644
index 0000000..747b73c
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp
@@ -0,0 +1,55 @@
+//===-- AnalysisManager.cpp -------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+
+using namespace clang;
+using namespace ento;
+
+void AnalysisManager::anchor() { }
+
+AnalysisManager::AnalysisManager(ASTContext &ctx, DiagnosticsEngine &diags,
+                                 const LangOptions &lang,
+                                 const PathDiagnosticConsumers &PDC,
+                                 StoreManagerCreator storemgr,
+                                 ConstraintManagerCreator constraintmgr, 
+                                 CheckerManager *checkerMgr,
+                                 AnalyzerOptions &Options)
+  : AnaCtxMgr(Options.UnoptimizedCFG,
+              /*AddImplicitDtors=*/true,
+              /*AddInitializers=*/true,
+              Options.includeTemporaryDtorsInCFG(),
+              Options.shouldSynthesizeBodies(),
+              Options.shouldConditionalizeStaticInitializers()),
+    Ctx(ctx),
+    Diags(diags),
+    LangOpts(lang),
+    PathConsumers(PDC),
+    CreateStoreMgr(storemgr), CreateConstraintMgr(constraintmgr),
+    CheckerMgr(checkerMgr),
+    options(Options) {
+  AnaCtxMgr.getCFGBuildOptions().setAllAlwaysAdd();
+}
+
+AnalysisManager::~AnalysisManager() {
+  FlushDiagnostics();
+  for (PathDiagnosticConsumers::iterator I = PathConsumers.begin(),
+       E = PathConsumers.end(); I != E; ++I) {
+    delete *I;
+  }
+}
+
+void AnalysisManager::FlushDiagnostics() {
+  PathDiagnosticConsumer::FilesMade filesMade;
+  for (PathDiagnosticConsumers::iterator I = PathConsumers.begin(),
+       E = PathConsumers.end();
+       I != E; ++I) {
+    (*I)->FlushDiagnostics(&filesMade);
+  }
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
new file mode 100644
index 0000000..ae70739
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
@@ -0,0 +1,256 @@
+//===-- AnalyzerOptions.cpp - Analysis Engine Options -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains special accessors for analyzer configuration options
+// with string representations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace llvm;
+
+AnalyzerOptions::UserModeKind AnalyzerOptions::getUserMode() {
+  if (UserMode == UMK_NotSet) {
+    StringRef ModeStr(Config.GetOrCreateValue("mode", "deep").getValue());
+    UserMode = llvm::StringSwitch<UserModeKind>(ModeStr)
+      .Case("shallow", UMK_Shallow)
+      .Case("deep", UMK_Deep)
+      .Default(UMK_NotSet);
+    assert(UserMode != UMK_NotSet && "User mode is invalid.");
+  }
+  return UserMode;
+}
+
+IPAKind AnalyzerOptions::getIPAMode() {
+  if (IPAMode == IPAK_NotSet) {
+
+    // Use the User Mode to set the default IPA value.
+    // Note, we have to add the string to the Config map for the ConfigDumper
+    // checker to function properly.
+    const char *DefaultIPA = 0;
+    UserModeKind HighLevelMode = getUserMode();
+    if (HighLevelMode == UMK_Shallow)
+      DefaultIPA = "inlining";
+    else if (HighLevelMode == UMK_Deep)
+      DefaultIPA = "dynamic-bifurcate";
+    assert(DefaultIPA);
+
+    // Lookup the ipa configuration option, use the default from User Mode.
+    StringRef ModeStr(Config.GetOrCreateValue("ipa", DefaultIPA).getValue());
+    IPAKind IPAConfig = llvm::StringSwitch<IPAKind>(ModeStr)
+            .Case("none", IPAK_None)
+            .Case("basic-inlining", IPAK_BasicInlining)
+            .Case("inlining", IPAK_Inlining)
+            .Case("dynamic", IPAK_DynamicDispatch)
+            .Case("dynamic-bifurcate", IPAK_DynamicDispatchBifurcate)
+            .Default(IPAK_NotSet);
+    assert(IPAConfig != IPAK_NotSet && "IPA Mode is invalid.");
+
+    // Set the member variable.
+    IPAMode = IPAConfig;
+  }
+  
+  return IPAMode;
+}
+
+bool
+AnalyzerOptions::mayInlineCXXMemberFunction(CXXInlineableMemberKind K) {
+  if (getIPAMode() < IPAK_Inlining)
+    return false;
+
+  if (!CXXMemberInliningMode) {
+    static const char *ModeKey = "c++-inlining";
+    
+    StringRef ModeStr(Config.GetOrCreateValue(ModeKey,
+                                              "destructors").getValue());
+
+    CXXInlineableMemberKind &MutableMode =
+      const_cast<CXXInlineableMemberKind &>(CXXMemberInliningMode);
+
+    MutableMode = llvm::StringSwitch<CXXInlineableMemberKind>(ModeStr)
+      .Case("constructors", CIMK_Constructors)
+      .Case("destructors", CIMK_Destructors)
+      .Case("none", CIMK_None)
+      .Case("methods", CIMK_MemberFunctions)
+      .Default(CXXInlineableMemberKind());
+
+    if (!MutableMode) {
+      // FIXME: We should emit a warning here about an unknown inlining kind,
+      // but the AnalyzerOptions doesn't have access to a diagnostic engine.
+      MutableMode = CIMK_None;
+    }
+  }
+
+  return CXXMemberInliningMode >= K;
+}
+
+static StringRef toString(bool b) { return b ? "true" : "false"; }
+
+bool AnalyzerOptions::getBooleanOption(StringRef Name, bool DefaultVal) {
+  // FIXME: We should emit a warning here if the value is something other than
+  // "true", "false", or the empty string (meaning the default value),
+  // but the AnalyzerOptions doesn't have access to a diagnostic engine.
+  StringRef V(Config.GetOrCreateValue(Name, toString(DefaultVal)).getValue());
+  return llvm::StringSwitch<bool>(V)
+      .Case("true", true)
+      .Case("false", false)
+      .Default(DefaultVal);
+}
+
+bool AnalyzerOptions::getBooleanOption(Optional<bool> &V, StringRef Name,
+                                       bool DefaultVal) {
+  if (!V.hasValue())
+    V = getBooleanOption(Name, DefaultVal);
+  return V.getValue();
+}
+
+bool AnalyzerOptions::includeTemporaryDtorsInCFG() {
+  return getBooleanOption(IncludeTemporaryDtorsInCFG,
+                          "cfg-temporary-dtors",
+                          /* Default = */ false);
+}
+
+bool AnalyzerOptions::mayInlineCXXStandardLibrary() {
+  return getBooleanOption(InlineCXXStandardLibrary,
+                          "c++-stdlib-inlining",
+                          /*Default=*/true);
+}
+
+bool AnalyzerOptions::mayInlineTemplateFunctions() {
+  return getBooleanOption(InlineTemplateFunctions,
+                          "c++-template-inlining",
+                          /*Default=*/true);
+}
+
+bool AnalyzerOptions::mayInlineCXXContainerCtorsAndDtors() {
+  return getBooleanOption(InlineCXXContainerCtorsAndDtors,
+                          "c++-container-inlining",
+                          /*Default=*/false);
+}
+
+
+bool AnalyzerOptions::mayInlineObjCMethod() {
+  return getBooleanOption(ObjCInliningMode,
+                          "objc-inlining",
+                          /* Default = */ true);
+}
+
+bool AnalyzerOptions::shouldSuppressNullReturnPaths() {
+  return getBooleanOption(SuppressNullReturnPaths,
+                          "suppress-null-return-paths",
+                          /* Default = */ true);
+}
+
+bool AnalyzerOptions::shouldAvoidSuppressingNullArgumentPaths() {
+  return getBooleanOption(AvoidSuppressingNullArgumentPaths,
+                          "avoid-suppressing-null-argument-paths",
+                          /* Default = */ false);
+}
+
+bool AnalyzerOptions::shouldSuppressInlinedDefensiveChecks() {
+  return getBooleanOption(SuppressInlinedDefensiveChecks,
+                          "suppress-inlined-defensive-checks",
+                          /* Default = */ true);
+}
+
+bool AnalyzerOptions::shouldSuppressFromCXXStandardLibrary() {
+  return getBooleanOption(SuppressFromCXXStandardLibrary,
+                          "suppress-c++-stdlib",
+                          /* Default = */ false);
+}
+
+int AnalyzerOptions::getOptionAsInteger(StringRef Name, int DefaultVal) {
+  SmallString<10> StrBuf;
+  llvm::raw_svector_ostream OS(StrBuf);
+  OS << DefaultVal;
+  
+  StringRef V(Config.GetOrCreateValue(Name, OS.str()).getValue());
+  int Res = DefaultVal;
+  bool b = V.getAsInteger(10, Res);
+  assert(!b && "analyzer-config option should be numeric");
+  (void) b;
+  return Res;
+}
+
+unsigned AnalyzerOptions::getAlwaysInlineSize() {
+  if (!AlwaysInlineSize.hasValue())
+    AlwaysInlineSize = getOptionAsInteger("ipa-always-inline-size", 3);
+  return AlwaysInlineSize.getValue();
+}
+
+unsigned AnalyzerOptions::getMaxInlinableSize() {
+  if (!MaxInlinableSize.hasValue()) {
+
+    int DefaultValue = 0;
+    UserModeKind HighLevelMode = getUserMode();
+    switch (HighLevelMode) {
+      default:
+        llvm_unreachable("Invalid mode.");
+      case UMK_Shallow:
+        DefaultValue = 4;
+        break;
+      case UMK_Deep:
+        DefaultValue = 50;
+        break;
+    }
+
+    MaxInlinableSize = getOptionAsInteger("max-inlinable-size", DefaultValue);
+  }
+  return MaxInlinableSize.getValue();
+}
+
+unsigned AnalyzerOptions::getGraphTrimInterval() {
+  if (!GraphTrimInterval.hasValue())
+    GraphTrimInterval = getOptionAsInteger("graph-trim-interval", 1000);
+  return GraphTrimInterval.getValue();
+}
+
+unsigned AnalyzerOptions::getMaxTimesInlineLarge() {
+  if (!MaxTimesInlineLarge.hasValue())
+    MaxTimesInlineLarge = getOptionAsInteger("max-times-inline-large", 32);
+  return MaxTimesInlineLarge.getValue();
+}
+
+unsigned AnalyzerOptions::getMaxNodesPerTopLevelFunction() {
+  if (!MaxNodesPerTopLevelFunction.hasValue()) {
+    int DefaultValue = 0;
+    UserModeKind HighLevelMode = getUserMode();
+    switch (HighLevelMode) {
+      default:
+        llvm_unreachable("Invalid mode.");
+      case UMK_Shallow:
+        DefaultValue = 75000;
+        break;
+      case UMK_Deep:
+        DefaultValue = 150000;
+        break;
+    }
+    MaxNodesPerTopLevelFunction = getOptionAsInteger("max-nodes", DefaultValue);
+  }
+  return MaxNodesPerTopLevelFunction.getValue();
+}
+
+bool AnalyzerOptions::shouldSynthesizeBodies() {
+  return getBooleanOption("faux-bodies", true);
+}
+
+bool AnalyzerOptions::shouldPrunePaths() {
+  return getBooleanOption("prune-paths", true);
+}
+
+bool AnalyzerOptions::shouldConditionalizeStaticInitializers() {
+  return getBooleanOption("cfg-conditional-static-initializers", true);
+}
+
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
new file mode 100644
index 0000000..a6c400f
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
@@ -0,0 +1,288 @@
+//=== BasicValueFactory.cpp - Basic values for Path Sens analysis --*- C++ -*-//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines BasicValueFactory, a class that manages the lifetime
+//  of APSInt objects and symbolic constraints used by ExprEngine
+//  and related classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
+
+using namespace clang;
+using namespace ento;
+
+void CompoundValData::Profile(llvm::FoldingSetNodeID& ID, QualType T,
+                              llvm::ImmutableList<SVal> L) {
+  T.Profile(ID);
+  ID.AddPointer(L.getInternalPointer());
+}
+
+void LazyCompoundValData::Profile(llvm::FoldingSetNodeID& ID,
+                                  const StoreRef &store,
+                                  const TypedValueRegion *region) {
+  ID.AddPointer(store.getStore());
+  ID.AddPointer(region);
+}
+
+typedef std::pair<SVal, uintptr_t> SValData;
+typedef std::pair<SVal, SVal> SValPair;
+
+namespace llvm {
+template<> struct FoldingSetTrait<SValData> {
+  static inline void Profile(const SValData& X, llvm::FoldingSetNodeID& ID) {
+    X.first.Profile(ID);
+    ID.AddPointer( (void*) X.second);
+  }
+};
+
+template<> struct FoldingSetTrait<SValPair> {
+  static inline void Profile(const SValPair& X, llvm::FoldingSetNodeID& ID) {
+    X.first.Profile(ID);
+    X.second.Profile(ID);
+  }
+};
+}
+
+typedef llvm::FoldingSet<llvm::FoldingSetNodeWrapper<SValData> >
+  PersistentSValsTy;
+
+typedef llvm::FoldingSet<llvm::FoldingSetNodeWrapper<SValPair> >
+  PersistentSValPairsTy;
+
+BasicValueFactory::~BasicValueFactory() {
+  // Note that the dstor for the contents of APSIntSet will never be called,
+  // so we iterate over the set and invoke the dstor for each APSInt.  This
+  // frees an aux. memory allocated to represent very large constants.
+  for (APSIntSetTy::iterator I=APSIntSet.begin(), E=APSIntSet.end(); I!=E; ++I)
+    I->getValue().~APSInt();
+
+  delete (PersistentSValsTy*) PersistentSVals;
+  delete (PersistentSValPairsTy*) PersistentSValPairs;
+}
+
+const llvm::APSInt& BasicValueFactory::getValue(const llvm::APSInt& X) {
+  llvm::FoldingSetNodeID ID;
+  void *InsertPos;
+  typedef llvm::FoldingSetNodeWrapper<llvm::APSInt> FoldNodeTy;
+
+  X.Profile(ID);
+  FoldNodeTy* P = APSIntSet.FindNodeOrInsertPos(ID, InsertPos);
+
+  if (!P) {
+    P = (FoldNodeTy*) BPAlloc.Allocate<FoldNodeTy>();
+    new (P) FoldNodeTy(X);
+    APSIntSet.InsertNode(P, InsertPos);
+  }
+
+  return *P;
+}
+
+const llvm::APSInt& BasicValueFactory::getValue(const llvm::APInt& X,
+                                                bool isUnsigned) {
+  llvm::APSInt V(X, isUnsigned);
+  return getValue(V);
+}
+
+const llvm::APSInt& BasicValueFactory::getValue(uint64_t X, unsigned BitWidth,
+                                           bool isUnsigned) {
+  llvm::APSInt V(BitWidth, isUnsigned);
+  V = X;
+  return getValue(V);
+}
+
+const llvm::APSInt& BasicValueFactory::getValue(uint64_t X, QualType T) {
+
+  return getValue(getAPSIntType(T).getValue(X));
+}
+
+const CompoundValData*
+BasicValueFactory::getCompoundValData(QualType T,
+                                      llvm::ImmutableList<SVal> Vals) {
+
+  llvm::FoldingSetNodeID ID;
+  CompoundValData::Profile(ID, T, Vals);
+  void *InsertPos;
+
+  CompoundValData* D = CompoundValDataSet.FindNodeOrInsertPos(ID, InsertPos);
+
+  if (!D) {
+    D = (CompoundValData*) BPAlloc.Allocate<CompoundValData>();
+    new (D) CompoundValData(T, Vals);
+    CompoundValDataSet.InsertNode(D, InsertPos);
+  }
+
+  return D;
+}
+
+const LazyCompoundValData*
+BasicValueFactory::getLazyCompoundValData(const StoreRef &store,
+                                          const TypedValueRegion *region) {
+  llvm::FoldingSetNodeID ID;
+  LazyCompoundValData::Profile(ID, store, region);
+  void *InsertPos;
+
+  LazyCompoundValData *D =
+    LazyCompoundValDataSet.FindNodeOrInsertPos(ID, InsertPos);
+
+  if (!D) {
+    D = (LazyCompoundValData*) BPAlloc.Allocate<LazyCompoundValData>();
+    new (D) LazyCompoundValData(store, region);
+    LazyCompoundValDataSet.InsertNode(D, InsertPos);
+  }
+
+  return D;
+}
+
+const llvm::APSInt*
+BasicValueFactory::evalAPSInt(BinaryOperator::Opcode Op,
+                             const llvm::APSInt& V1, const llvm::APSInt& V2) {
+
+  switch (Op) {
+    default:
+      assert (false && "Invalid Opcode.");
+
+    case BO_Mul:
+      return &getValue( V1 * V2 );
+
+    case BO_Div:
+      return &getValue( V1 / V2 );
+
+    case BO_Rem:
+      return &getValue( V1 % V2 );
+
+    case BO_Add:
+      return &getValue( V1 + V2 );
+
+    case BO_Sub:
+      return &getValue( V1 - V2 );
+
+    case BO_Shl: {
+
+      // FIXME: This logic should probably go higher up, where we can
+      // test these conditions symbolically.
+
+      // FIXME: Expand these checks to include all undefined behavior.
+
+      if (V2.isSigned() && V2.isNegative())
+        return NULL;
+
+      uint64_t Amt = V2.getZExtValue();
+
+      if (Amt > V1.getBitWidth())
+        return NULL;
+
+      return &getValue( V1.operator<<( (unsigned) Amt ));
+    }
+
+    case BO_Shr: {
+
+      // FIXME: This logic should probably go higher up, where we can
+      // test these conditions symbolically.
+
+      // FIXME: Expand these checks to include all undefined behavior.
+
+      if (V2.isSigned() && V2.isNegative())
+        return NULL;
+
+      uint64_t Amt = V2.getZExtValue();
+
+      if (Amt > V1.getBitWidth())
+        return NULL;
+
+      return &getValue( V1.operator>>( (unsigned) Amt ));
+    }
+
+    case BO_LT:
+      return &getTruthValue( V1 < V2 );
+
+    case BO_GT:
+      return &getTruthValue( V1 > V2 );
+
+    case BO_LE:
+      return &getTruthValue( V1 <= V2 );
+
+    case BO_GE:
+      return &getTruthValue( V1 >= V2 );
+
+    case BO_EQ:
+      return &getTruthValue( V1 == V2 );
+
+    case BO_NE:
+      return &getTruthValue( V1 != V2 );
+
+      // Note: LAnd, LOr, Comma are handled specially by higher-level logic.
+
+    case BO_And:
+      return &getValue( V1 & V2 );
+
+    case BO_Or:
+      return &getValue( V1 | V2 );
+
+    case BO_Xor:
+      return &getValue( V1 ^ V2 );
+  }
+}
+
+
+const std::pair<SVal, uintptr_t>&
+BasicValueFactory::getPersistentSValWithData(const SVal& V, uintptr_t Data) {
+
+  // Lazily create the folding set.
+  if (!PersistentSVals) PersistentSVals = new PersistentSValsTy();
+
+  llvm::FoldingSetNodeID ID;
+  void *InsertPos;
+  V.Profile(ID);
+  ID.AddPointer((void*) Data);
+
+  PersistentSValsTy& Map = *((PersistentSValsTy*) PersistentSVals);
+
+  typedef llvm::FoldingSetNodeWrapper<SValData> FoldNodeTy;
+  FoldNodeTy* P = Map.FindNodeOrInsertPos(ID, InsertPos);
+
+  if (!P) {
+    P = (FoldNodeTy*) BPAlloc.Allocate<FoldNodeTy>();
+    new (P) FoldNodeTy(std::make_pair(V, Data));
+    Map.InsertNode(P, InsertPos);
+  }
+
+  return P->getValue();
+}
+
+const std::pair<SVal, SVal>&
+BasicValueFactory::getPersistentSValPair(const SVal& V1, const SVal& V2) {
+
+  // Lazily create the folding set.
+  if (!PersistentSValPairs) PersistentSValPairs = new PersistentSValPairsTy();
+
+  llvm::FoldingSetNodeID ID;
+  void *InsertPos;
+  V1.Profile(ID);
+  V2.Profile(ID);
+
+  PersistentSValPairsTy& Map = *((PersistentSValPairsTy*) PersistentSValPairs);
+
+  typedef llvm::FoldingSetNodeWrapper<SValPair> FoldNodeTy;
+  FoldNodeTy* P = Map.FindNodeOrInsertPos(ID, InsertPos);
+
+  if (!P) {
+    P = (FoldNodeTy*) BPAlloc.Allocate<FoldNodeTy>();
+    new (P) FoldNodeTy(std::make_pair(V1, V2));
+    Map.InsertNode(P, InsertPos);
+  }
+
+  return P->getValue();
+}
+
+const SVal* BasicValueFactory::getPersistentSVal(SVal X) {
+  return &getPersistentSValWithData(X, 0).first;
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/BlockCounter.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/BlockCounter.cpp
new file mode 100644
index 0000000..74d761e
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/BlockCounter.cpp
@@ -0,0 +1,86 @@
+//==- BlockCounter.h - ADT for counting block visits -------------*- C++ -*-//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines BlockCounter, an abstract data type used to count
+//  the number of times a given block has been visited along a path
+//  analyzed by CoreEngine.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/BlockCounter.h"
+#include "llvm/ADT/ImmutableMap.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class CountKey {
+  const StackFrameContext *CallSite;
+  unsigned BlockID;
+
+public:
+  CountKey(const StackFrameContext *CS, unsigned ID) 
+    : CallSite(CS), BlockID(ID) {}
+
+  bool operator==(const CountKey &RHS) const {
+    return (CallSite == RHS.CallSite) && (BlockID == RHS.BlockID);
+  }
+
+  bool operator<(const CountKey &RHS) const {
+    return (CallSite == RHS.CallSite) ? (BlockID < RHS.BlockID) 
+                                      : (CallSite < RHS.CallSite);
+  }
+
+  void Profile(llvm::FoldingSetNodeID &ID) const {
+    ID.AddPointer(CallSite);
+    ID.AddInteger(BlockID);
+  }
+};
+
+}
+
+typedef llvm::ImmutableMap<CountKey, unsigned> CountMap;
+
+static inline CountMap GetMap(void *D) {
+  return CountMap(static_cast<CountMap::TreeTy*>(D));
+}
+
+static inline CountMap::Factory& GetFactory(void *F) {
+  return *static_cast<CountMap::Factory*>(F);
+}
+
+unsigned BlockCounter::getNumVisited(const StackFrameContext *CallSite, 
+                                       unsigned BlockID) const {
+  CountMap M = GetMap(Data);
+  CountMap::data_type* T = M.lookup(CountKey(CallSite, BlockID));
+  return T ? *T : 0;
+}
+
+BlockCounter::Factory::Factory(llvm::BumpPtrAllocator& Alloc) {
+  F = new CountMap::Factory(Alloc);
+}
+
+BlockCounter::Factory::~Factory() {
+  delete static_cast<CountMap::Factory*>(F);
+}
+
+BlockCounter
+BlockCounter::Factory::IncrementCount(BlockCounter BC, 
+                                        const StackFrameContext *CallSite,
+                                        unsigned BlockID) {
+  return BlockCounter(GetFactory(F).add(GetMap(BC.Data), 
+                                          CountKey(CallSite, BlockID),
+                             BC.getNumVisited(CallSite, BlockID)+1).getRoot());
+}
+
+BlockCounter
+BlockCounter::Factory::GetEmptyCounter() {
+  return BlockCounter(GetFactory(F).getEmptyMap().getRoot());
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
new file mode 100644
index 0000000..a85235c
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
@@ -0,0 +1,2917 @@
+// BugReporter.cpp - Generate PathDiagnostics for Bugs ------------*- C++ -*--//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines BugReporter, a utility class for generating
+//  PathDiagnostics.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "BugReporter"
+
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/ProgramPoint.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/raw_ostream.h"
+#include <queue>
+
+using namespace clang;
+using namespace ento;
+
+STATISTIC(MaxBugClassSize,
+          "The maximum number of bug reports in the same equivalence class");
+STATISTIC(MaxValidBugClassSize,
+          "The maximum number of bug reports in the same equivalence class "
+          "where at least one report is valid (not suppressed)");
+
+BugReporterVisitor::~BugReporterVisitor() {}
+
+void BugReporterContext::anchor() {}
+
+//===----------------------------------------------------------------------===//
+// Helper routines for walking the ExplodedGraph and fetching statements.
+//===----------------------------------------------------------------------===//
+
+static const Stmt *GetPreviousStmt(const ExplodedNode *N) {
+  for (N = N->getFirstPred(); N; N = N->getFirstPred())
+    if (const Stmt *S = PathDiagnosticLocation::getStmt(N))
+      return S;
+
+  return 0;
+}
+
+static inline const Stmt*
+GetCurrentOrPreviousStmt(const ExplodedNode *N) {
+  if (const Stmt *S = PathDiagnosticLocation::getStmt(N))
+    return S;
+
+  return GetPreviousStmt(N);
+}
+
+//===----------------------------------------------------------------------===//
+// Diagnostic cleanup.
+//===----------------------------------------------------------------------===//
+
+static PathDiagnosticEventPiece *
+eventsDescribeSameCondition(PathDiagnosticEventPiece *X,
+                            PathDiagnosticEventPiece *Y) {
+  // Prefer diagnostics that come from ConditionBRVisitor over
+  // those that came from TrackConstraintBRVisitor.
+  const void *tagPreferred = ConditionBRVisitor::getTag();
+  const void *tagLesser = TrackConstraintBRVisitor::getTag();
+  
+  if (X->getLocation() != Y->getLocation())
+    return 0;
+  
+  if (X->getTag() == tagPreferred && Y->getTag() == tagLesser)
+    return X;
+  
+  if (Y->getTag() == tagPreferred && X->getTag() == tagLesser)
+    return Y;
+  
+  return 0;
+}
+
+/// An optimization pass over PathPieces that removes redundant diagnostics
+/// generated by both ConditionBRVisitor and TrackConstraintBRVisitor.  Both
+/// BugReporterVisitors use different methods to generate diagnostics, with
+/// one capable of emitting diagnostics in some cases but not in others.  This
+/// can lead to redundant diagnostic pieces at the same point in a path.
+static void removeRedundantMsgs(PathPieces &path) {
+  unsigned N = path.size();
+  if (N < 2)
+    return;
+  // NOTE: this loop intentionally is not using an iterator.  Instead, we
+  // are streaming the path and modifying it in place.  This is done by
+  // grabbing the front, processing it, and if we decide to keep it append
+  // it to the end of the path.  The entire path is processed in this way.
+  for (unsigned i = 0; i < N; ++i) {
+    IntrusiveRefCntPtr<PathDiagnosticPiece> piece(path.front());
+    path.pop_front();
+    
+    switch (piece->getKind()) {
+      case clang::ento::PathDiagnosticPiece::Call:
+        removeRedundantMsgs(cast<PathDiagnosticCallPiece>(piece)->path);
+        break;
+      case clang::ento::PathDiagnosticPiece::Macro:
+        removeRedundantMsgs(cast<PathDiagnosticMacroPiece>(piece)->subPieces);
+        break;
+      case clang::ento::PathDiagnosticPiece::ControlFlow:
+        break;
+      case clang::ento::PathDiagnosticPiece::Event: {
+        if (i == N-1)
+          break;
+        
+        if (PathDiagnosticEventPiece *nextEvent =
+            dyn_cast<PathDiagnosticEventPiece>(path.front().getPtr())) {
+          PathDiagnosticEventPiece *event =
+            cast<PathDiagnosticEventPiece>(piece);
+          // Check to see if we should keep one of the two pieces.  If we
+          // come up with a preference, record which piece to keep, and consume
+          // another piece from the path.
+          if (PathDiagnosticEventPiece *pieceToKeep =
+              eventsDescribeSameCondition(event, nextEvent)) {
+            piece = pieceToKeep;
+            path.pop_front();
+            ++i;
+          }
+        }
+        break;
+      }
+    }
+    path.push_back(piece);
+  }
+}
+
+/// A map from PathDiagnosticPiece to the LocationContext of the inlined
+/// function call it represents.
+typedef llvm::DenseMap<const PathPieces *, const LocationContext *>
+        LocationContextMap;
+
+/// Recursively scan through a path and prune out calls and macros pieces
+/// that aren't needed.  Return true if afterwards the path contains
+/// "interesting stuff" which means it shouldn't be pruned from the parent path.
+static bool removeUnneededCalls(PathPieces &pieces, BugReport *R,
+                                LocationContextMap &LCM) {
+  bool containsSomethingInteresting = false;
+  const unsigned N = pieces.size();
+  
+  for (unsigned i = 0 ; i < N ; ++i) {
+    // Remove the front piece from the path.  If it is still something we
+    // want to keep once we are done, we will push it back on the end.
+    IntrusiveRefCntPtr<PathDiagnosticPiece> piece(pieces.front());
+    pieces.pop_front();
+    
+    // Throw away pieces with invalid locations. Note that we can't throw away
+    // calls just yet because they might have something interesting inside them.
+    // If so, their locations will be adjusted as necessary later.
+    if (piece->getKind() != PathDiagnosticPiece::Call &&
+        piece->getLocation().asLocation().isInvalid())
+      continue;
+
+    switch (piece->getKind()) {
+      case PathDiagnosticPiece::Call: {
+        PathDiagnosticCallPiece *call = cast<PathDiagnosticCallPiece>(piece);
+        // Check if the location context is interesting.
+        assert(LCM.count(&call->path));
+        if (R->isInteresting(LCM[&call->path])) {
+          containsSomethingInteresting = true;
+          break;
+        }
+
+        if (!removeUnneededCalls(call->path, R, LCM))
+          continue;
+        
+        containsSomethingInteresting = true;
+        break;
+      }
+      case PathDiagnosticPiece::Macro: {
+        PathDiagnosticMacroPiece *macro = cast<PathDiagnosticMacroPiece>(piece);
+        if (!removeUnneededCalls(macro->subPieces, R, LCM))
+          continue;
+        containsSomethingInteresting = true;
+        break;
+      }
+      case PathDiagnosticPiece::Event: {
+        PathDiagnosticEventPiece *event = cast<PathDiagnosticEventPiece>(piece);
+        
+        // We never throw away an event, but we do throw it away wholesale
+        // as part of a path if we throw the entire path away.
+        containsSomethingInteresting |= !event->isPrunable();
+        break;
+      }
+      case PathDiagnosticPiece::ControlFlow:
+        break;
+    }
+    
+    pieces.push_back(piece);
+  }
+  
+  return containsSomethingInteresting;
+}
+
+/// Recursively scan through a path and make sure that all call pieces have
+/// valid locations. Note that all other pieces with invalid locations should
+/// have already been pruned out.
+static void adjustCallLocations(PathPieces &Pieces,
+                                PathDiagnosticLocation *LastCallLocation = 0) {
+  for (PathPieces::iterator I = Pieces.begin(), E = Pieces.end(); I != E; ++I) {
+    PathDiagnosticCallPiece *Call = dyn_cast<PathDiagnosticCallPiece>(*I);
+
+    if (!Call) {
+      assert((*I)->getLocation().asLocation().isValid());
+      continue;
+    }
+
+    if (LastCallLocation) {
+      if (!Call->callEnter.asLocation().isValid() ||
+          Call->getCaller()->isImplicit())
+        Call->callEnter = *LastCallLocation;
+      if (!Call->callReturn.asLocation().isValid() ||
+          Call->getCaller()->isImplicit())
+        Call->callReturn = *LastCallLocation;
+    }
+
+    // Recursively clean out the subclass.  Keep this call around if
+    // it contains any informative diagnostics.
+    PathDiagnosticLocation *ThisCallLocation;
+    if (Call->callEnterWithin.asLocation().isValid() &&
+        !Call->getCallee()->isImplicit())
+      ThisCallLocation = &Call->callEnterWithin;
+    else
+      ThisCallLocation = &Call->callEnter;
+
+    assert(ThisCallLocation && "Outermost call has an invalid location");
+    adjustCallLocations(Call->path, ThisCallLocation);
+  }
+}
+
+//===----------------------------------------------------------------------===//
+// PathDiagnosticBuilder and its associated routines and helper objects.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class NodeMapClosure : public BugReport::NodeResolver {
+  InterExplodedGraphMap &M;
+public:
+  NodeMapClosure(InterExplodedGraphMap &m) : M(m) {}
+
+  const ExplodedNode *getOriginalNode(const ExplodedNode *N) {
+    return M.lookup(N);
+  }
+};
+
+class PathDiagnosticBuilder : public BugReporterContext {
+  BugReport *R;
+  PathDiagnosticConsumer *PDC;
+  NodeMapClosure NMC;
+public:
+  const LocationContext *LC;
+  
+  PathDiagnosticBuilder(GRBugReporter &br,
+                        BugReport *r, InterExplodedGraphMap &Backmap,
+                        PathDiagnosticConsumer *pdc)
+    : BugReporterContext(br),
+      R(r), PDC(pdc), NMC(Backmap), LC(r->getErrorNode()->getLocationContext())
+  {}
+
+  PathDiagnosticLocation ExecutionContinues(const ExplodedNode *N);
+
+  PathDiagnosticLocation ExecutionContinues(llvm::raw_string_ostream &os,
+                                            const ExplodedNode *N);
+
+  BugReport *getBugReport() { return R; }
+
+  Decl const &getCodeDecl() { return R->getErrorNode()->getCodeDecl(); }
+  
+  ParentMap& getParentMap() { return LC->getParentMap(); }
+
+  const Stmt *getParent(const Stmt *S) {
+    return getParentMap().getParent(S);
+  }
+
+  virtual NodeMapClosure& getNodeResolver() { return NMC; }
+
+  PathDiagnosticLocation getEnclosingStmtLocation(const Stmt *S);
+
+  PathDiagnosticConsumer::PathGenerationScheme getGenerationScheme() const {
+    return PDC ? PDC->getGenerationScheme() : PathDiagnosticConsumer::Extensive;
+  }
+
+  bool supportsLogicalOpControlFlow() const {
+    return PDC ? PDC->supportsLogicalOpControlFlow() : true;
+  }
+};
+} // end anonymous namespace
+
+PathDiagnosticLocation
+PathDiagnosticBuilder::ExecutionContinues(const ExplodedNode *N) {
+  if (const Stmt *S = PathDiagnosticLocation::getNextStmt(N))
+    return PathDiagnosticLocation(S, getSourceManager(), LC);
+
+  return PathDiagnosticLocation::createDeclEnd(N->getLocationContext(),
+                                               getSourceManager());
+}
+
+PathDiagnosticLocation
+PathDiagnosticBuilder::ExecutionContinues(llvm::raw_string_ostream &os,
+                                          const ExplodedNode *N) {
+
+  // Slow, but probably doesn't matter.
+  if (os.str().empty())
+    os << ' ';
+
+  const PathDiagnosticLocation &Loc = ExecutionContinues(N);
+
+  if (Loc.asStmt())
+    os << "Execution continues on line "
+       << getSourceManager().getExpansionLineNumber(Loc.asLocation())
+       << '.';
+  else {
+    os << "Execution jumps to the end of the ";
+    const Decl *D = N->getLocationContext()->getDecl();
+    if (isa<ObjCMethodDecl>(D))
+      os << "method";
+    else if (isa<FunctionDecl>(D))
+      os << "function";
+    else {
+      assert(isa<BlockDecl>(D));
+      os << "anonymous block";
+    }
+    os << '.';
+  }
+
+  return Loc;
+}
+
+static bool IsNested(const Stmt *S, ParentMap &PM) {
+  if (isa<Expr>(S) && PM.isConsumedExpr(cast<Expr>(S)))
+    return true;
+
+  const Stmt *Parent = PM.getParentIgnoreParens(S);
+
+  if (Parent)
+    switch (Parent->getStmtClass()) {
+      case Stmt::ForStmtClass:
+      case Stmt::DoStmtClass:
+      case Stmt::WhileStmtClass:
+        return true;
+      default:
+        break;
+    }
+
+  return false;
+}
+
+PathDiagnosticLocation
+PathDiagnosticBuilder::getEnclosingStmtLocation(const Stmt *S) {
+  assert(S && "Null Stmt *passed to getEnclosingStmtLocation");
+  ParentMap &P = getParentMap();
+  SourceManager &SMgr = getSourceManager();
+
+  while (IsNested(S, P)) {
+    const Stmt *Parent = P.getParentIgnoreParens(S);
+
+    if (!Parent)
+      break;
+
+    switch (Parent->getStmtClass()) {
+      case Stmt::BinaryOperatorClass: {
+        const BinaryOperator *B = cast<BinaryOperator>(Parent);
+        if (B->isLogicalOp())
+          return PathDiagnosticLocation(S, SMgr, LC);
+        break;
+      }
+      case Stmt::CompoundStmtClass:
+      case Stmt::StmtExprClass:
+        return PathDiagnosticLocation(S, SMgr, LC);
+      case Stmt::ChooseExprClass:
+        // Similar to '?' if we are referring to condition, just have the edge
+        // point to the entire choose expression.
+        if (cast<ChooseExpr>(Parent)->getCond() == S)
+          return PathDiagnosticLocation(Parent, SMgr, LC);
+        else
+          return PathDiagnosticLocation(S, SMgr, LC);
+      case Stmt::BinaryConditionalOperatorClass:
+      case Stmt::ConditionalOperatorClass:
+        // For '?', if we are referring to condition, just have the edge point
+        // to the entire '?' expression.
+        if (cast<AbstractConditionalOperator>(Parent)->getCond() == S)
+          return PathDiagnosticLocation(Parent, SMgr, LC);
+        else
+          return PathDiagnosticLocation(S, SMgr, LC);
+      case Stmt::DoStmtClass:
+          return PathDiagnosticLocation(S, SMgr, LC);
+      case Stmt::ForStmtClass:
+        if (cast<ForStmt>(Parent)->getBody() == S)
+          return PathDiagnosticLocation(S, SMgr, LC);
+        break;
+      case Stmt::IfStmtClass:
+        if (cast<IfStmt>(Parent)->getCond() != S)
+          return PathDiagnosticLocation(S, SMgr, LC);
+        break;
+      case Stmt::ObjCForCollectionStmtClass:
+        if (cast<ObjCForCollectionStmt>(Parent)->getBody() == S)
+          return PathDiagnosticLocation(S, SMgr, LC);
+        break;
+      case Stmt::WhileStmtClass:
+        if (cast<WhileStmt>(Parent)->getCond() != S)
+          return PathDiagnosticLocation(S, SMgr, LC);
+        break;
+      default:
+        break;
+    }
+
+    S = Parent;
+  }
+
+  assert(S && "Cannot have null Stmt for PathDiagnosticLocation");
+
+  // Special case: DeclStmts can appear in for statement declarations, in which
+  //  case the ForStmt is the context.
+  if (isa<DeclStmt>(S)) {
+    if (const Stmt *Parent = P.getParent(S)) {
+      switch (Parent->getStmtClass()) {
+        case Stmt::ForStmtClass:
+        case Stmt::ObjCForCollectionStmtClass:
+          return PathDiagnosticLocation(Parent, SMgr, LC);
+        default:
+          break;
+      }
+    }
+  }
+  else if (isa<BinaryOperator>(S)) {
+    // Special case: the binary operator represents the initialization
+    // code in a for statement (this can happen when the variable being
+    // initialized is an old variable.
+    if (const ForStmt *FS =
+          dyn_cast_or_null<ForStmt>(P.getParentIgnoreParens(S))) {
+      if (FS->getInit() == S)
+        return PathDiagnosticLocation(FS, SMgr, LC);
+    }
+  }
+
+  return PathDiagnosticLocation(S, SMgr, LC);
+}
+
+//===----------------------------------------------------------------------===//
+// "Visitors only" path diagnostic generation algorithm.
+//===----------------------------------------------------------------------===//
+static bool GenerateVisitorsOnlyPathDiagnostic(PathDiagnostic &PD,
+                                               PathDiagnosticBuilder &PDB,
+                                               const ExplodedNode *N,
+                                      ArrayRef<BugReporterVisitor *> visitors) {
+  // All path generation skips the very first node (the error node).
+  // This is because there is special handling for the end-of-path note.
+  N = N->getFirstPred();
+  if (!N)
+    return true;
+
+  BugReport *R = PDB.getBugReport();
+  while (const ExplodedNode *Pred = N->getFirstPred()) {
+    for (ArrayRef<BugReporterVisitor *>::iterator I = visitors.begin(),
+                                                  E = visitors.end();
+         I != E; ++I) {
+      // Visit all the node pairs, but throw the path pieces away.
+      PathDiagnosticPiece *Piece = (*I)->VisitNode(N, Pred, PDB, *R);
+      delete Piece;
+    }
+
+    N = Pred;
+  }
+
+  return R->isValid();
+}
+
+//===----------------------------------------------------------------------===//
+// "Minimal" path diagnostic generation algorithm.
+//===----------------------------------------------------------------------===//
+typedef std::pair<PathDiagnosticCallPiece*, const ExplodedNode*> StackDiagPair;
+typedef SmallVector<StackDiagPair, 6> StackDiagVector;
+
+static void updateStackPiecesWithMessage(PathDiagnosticPiece *P,
+                                         StackDiagVector &CallStack) {
+  // If the piece contains a special message, add it to all the call
+  // pieces on the active stack.
+  if (PathDiagnosticEventPiece *ep =
+        dyn_cast<PathDiagnosticEventPiece>(P)) {
+
+    if (ep->hasCallStackHint())
+      for (StackDiagVector::iterator I = CallStack.begin(),
+                                     E = CallStack.end(); I != E; ++I) {
+        PathDiagnosticCallPiece *CP = I->first;
+        const ExplodedNode *N = I->second;
+        std::string stackMsg = ep->getCallStackMessage(N);
+
+        // The last message on the path to final bug is the most important
+        // one. Since we traverse the path backwards, do not add the message
+        // if one has been previously added.
+        if  (!CP->hasCallStackMessage())
+          CP->setCallStackMessage(stackMsg);
+      }
+  }
+}
+
+static void CompactPathDiagnostic(PathPieces &path, const SourceManager& SM);
+
+static bool GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
+                                          PathDiagnosticBuilder &PDB,
+                                          const ExplodedNode *N,
+                                          LocationContextMap &LCM,
+                                      ArrayRef<BugReporterVisitor *> visitors) {
+
+  SourceManager& SMgr = PDB.getSourceManager();
+  const LocationContext *LC = PDB.LC;
+  const ExplodedNode *NextNode = N->pred_empty()
+                                        ? NULL : *(N->pred_begin());
+
+  StackDiagVector CallStack;
+
+  while (NextNode) {
+    N = NextNode;
+    PDB.LC = N->getLocationContext();
+    NextNode = N->getFirstPred();
+
+    ProgramPoint P = N->getLocation();
+
+    do {
+      if (Optional<CallExitEnd> CE = P.getAs<CallExitEnd>()) {
+        PathDiagnosticCallPiece *C =
+            PathDiagnosticCallPiece::construct(N, *CE, SMgr);
+        // Record the mapping from call piece to LocationContext.
+        LCM[&C->path] = CE->getCalleeContext();
+        PD.getActivePath().push_front(C);
+        PD.pushActivePath(&C->path);
+        CallStack.push_back(StackDiagPair(C, N));
+        break;
+      }
+
+      if (Optional<CallEnter> CE = P.getAs<CallEnter>()) {
+        // Flush all locations, and pop the active path.
+        bool VisitedEntireCall = PD.isWithinCall();
+        PD.popActivePath();
+
+        // Either we just added a bunch of stuff to the top-level path, or
+        // we have a previous CallExitEnd.  If the former, it means that the
+        // path terminated within a function call.  We must then take the
+        // current contents of the active path and place it within
+        // a new PathDiagnosticCallPiece.
+        PathDiagnosticCallPiece *C;
+        if (VisitedEntireCall) {
+          C = cast<PathDiagnosticCallPiece>(PD.getActivePath().front());
+        } else {
+          const Decl *Caller = CE->getLocationContext()->getDecl();
+          C = PathDiagnosticCallPiece::construct(PD.getActivePath(), Caller);
+          // Record the mapping from call piece to LocationContext.
+          LCM[&C->path] = CE->getCalleeContext();
+        }
+
+        C->setCallee(*CE, SMgr);
+        if (!CallStack.empty()) {
+          assert(CallStack.back().first == C);
+          CallStack.pop_back();
+        }
+        break;
+      }
+
+      if (Optional<BlockEdge> BE = P.getAs<BlockEdge>()) {
+        const CFGBlock *Src = BE->getSrc();
+        const CFGBlock *Dst = BE->getDst();
+        const Stmt *T = Src->getTerminator();
+
+        if (!T)
+          break;
+
+        PathDiagnosticLocation Start =
+            PathDiagnosticLocation::createBegin(T, SMgr,
+                N->getLocationContext());
+
+        switch (T->getStmtClass()) {
+        default:
+          break;
+
+        case Stmt::GotoStmtClass:
+        case Stmt::IndirectGotoStmtClass: {
+          const Stmt *S = PathDiagnosticLocation::getNextStmt(N);
+
+          if (!S)
+            break;
+
+          std::string sbuf;
+          llvm::raw_string_ostream os(sbuf);
+          const PathDiagnosticLocation &End = PDB.getEnclosingStmtLocation(S);
+
+          os << "Control jumps to line "
+              << End.asLocation().getExpansionLineNumber();
+          PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+              Start, End, os.str()));
+          break;
+        }
+
+        case Stmt::SwitchStmtClass: {
+          // Figure out what case arm we took.
+          std::string sbuf;
+          llvm::raw_string_ostream os(sbuf);
+
+          if (const Stmt *S = Dst->getLabel()) {
+            PathDiagnosticLocation End(S, SMgr, LC);
+
+            switch (S->getStmtClass()) {
+            default:
+              os << "No cases match in the switch statement. "
+              "Control jumps to line "
+              << End.asLocation().getExpansionLineNumber();
+              break;
+            case Stmt::DefaultStmtClass:
+              os << "Control jumps to the 'default' case at line "
+              << End.asLocation().getExpansionLineNumber();
+              break;
+
+            case Stmt::CaseStmtClass: {
+              os << "Control jumps to 'case ";
+              const CaseStmt *Case = cast<CaseStmt>(S);
+              const Expr *LHS = Case->getLHS()->IgnoreParenCasts();
+
+              // Determine if it is an enum.
+              bool GetRawInt = true;
+
+              if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(LHS)) {
+                // FIXME: Maybe this should be an assertion.  Are there cases
+                // were it is not an EnumConstantDecl?
+                const EnumConstantDecl *D =
+                    dyn_cast<EnumConstantDecl>(DR->getDecl());
+
+                if (D) {
+                  GetRawInt = false;
+                  os << *D;
+                }
+              }
+
+              if (GetRawInt)
+                os << LHS->EvaluateKnownConstInt(PDB.getASTContext());
+
+              os << ":'  at line "
+                  << End.asLocation().getExpansionLineNumber();
+              break;
+            }
+            }
+            PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+                Start, End, os.str()));
+          }
+          else {
+            os << "'Default' branch taken. ";
+            const PathDiagnosticLocation &End = PDB.ExecutionContinues(os, N);
+            PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+                Start, End, os.str()));
+          }
+
+          break;
+        }
+
+        case Stmt::BreakStmtClass:
+        case Stmt::ContinueStmtClass: {
+          std::string sbuf;
+          llvm::raw_string_ostream os(sbuf);
+          PathDiagnosticLocation End = PDB.ExecutionContinues(os, N);
+          PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+              Start, End, os.str()));
+          break;
+        }
+
+        // Determine control-flow for ternary '?'.
+        case Stmt::BinaryConditionalOperatorClass:
+        case Stmt::ConditionalOperatorClass: {
+          std::string sbuf;
+          llvm::raw_string_ostream os(sbuf);
+          os << "'?' condition is ";
+
+          if (*(Src->succ_begin()+1) == Dst)
+            os << "false";
+          else
+            os << "true";
+
+          PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+
+          if (const Stmt *S = End.asStmt())
+            End = PDB.getEnclosingStmtLocation(S);
+
+          PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+              Start, End, os.str()));
+          break;
+        }
+
+        // Determine control-flow for short-circuited '&&' and '||'.
+        case Stmt::BinaryOperatorClass: {
+          if (!PDB.supportsLogicalOpControlFlow())
+            break;
+
+          const BinaryOperator *B = cast<BinaryOperator>(T);
+          std::string sbuf;
+          llvm::raw_string_ostream os(sbuf);
+          os << "Left side of '";
+
+          if (B->getOpcode() == BO_LAnd) {
+            os << "&&" << "' is ";
+
+            if (*(Src->succ_begin()+1) == Dst) {
+              os << "false";
+              PathDiagnosticLocation End(B->getLHS(), SMgr, LC);
+              PathDiagnosticLocation Start =
+                  PathDiagnosticLocation::createOperatorLoc(B, SMgr);
+              PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+                  Start, End, os.str()));
+            }
+            else {
+              os << "true";
+              PathDiagnosticLocation Start(B->getLHS(), SMgr, LC);
+              PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+              PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+                  Start, End, os.str()));
+            }
+          }
+          else {
+            assert(B->getOpcode() == BO_LOr);
+            os << "||" << "' is ";
+
+            if (*(Src->succ_begin()+1) == Dst) {
+              os << "false";
+              PathDiagnosticLocation Start(B->getLHS(), SMgr, LC);
+              PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+              PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+                  Start, End, os.str()));
+            }
+            else {
+              os << "true";
+              PathDiagnosticLocation End(B->getLHS(), SMgr, LC);
+              PathDiagnosticLocation Start =
+                  PathDiagnosticLocation::createOperatorLoc(B, SMgr);
+              PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+                  Start, End, os.str()));
+            }
+          }
+
+          break;
+        }
+
+        case Stmt::DoStmtClass:  {
+          if (*(Src->succ_begin()) == Dst) {
+            std::string sbuf;
+            llvm::raw_string_ostream os(sbuf);
+
+            os << "Loop condition is true. ";
+            PathDiagnosticLocation End = PDB.ExecutionContinues(os, N);
+
+            if (const Stmt *S = End.asStmt())
+              End = PDB.getEnclosingStmtLocation(S);
+
+            PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+                Start, End, os.str()));
+          }
+          else {
+            PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+
+            if (const Stmt *S = End.asStmt())
+              End = PDB.getEnclosingStmtLocation(S);
+
+            PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+                Start, End, "Loop condition is false.  Exiting loop"));
+          }
+
+          break;
+        }
+
+        case Stmt::WhileStmtClass:
+        case Stmt::ForStmtClass: {
+          if (*(Src->succ_begin()+1) == Dst) {
+            std::string sbuf;
+            llvm::raw_string_ostream os(sbuf);
+
+            os << "Loop condition is false. ";
+            PathDiagnosticLocation End = PDB.ExecutionContinues(os, N);
+            if (const Stmt *S = End.asStmt())
+              End = PDB.getEnclosingStmtLocation(S);
+
+            PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+                Start, End, os.str()));
+          }
+          else {
+            PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+            if (const Stmt *S = End.asStmt())
+              End = PDB.getEnclosingStmtLocation(S);
+
+            PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+                Start, End, "Loop condition is true.  Entering loop body"));
+          }
+
+          break;
+        }
+
+        case Stmt::IfStmtClass: {
+          PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+
+          if (const Stmt *S = End.asStmt())
+            End = PDB.getEnclosingStmtLocation(S);
+
+          if (*(Src->succ_begin()+1) == Dst)
+            PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+                Start, End, "Taking false branch"));
+          else
+            PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(
+                Start, End, "Taking true branch"));
+
+          break;
+        }
+        }
+      }
+    } while(0);
+
+    if (NextNode) {
+      // Add diagnostic pieces from custom visitors.
+      BugReport *R = PDB.getBugReport();
+      for (ArrayRef<BugReporterVisitor *>::iterator I = visitors.begin(),
+                                                    E = visitors.end();
+           I != E; ++I) {
+        if (PathDiagnosticPiece *p = (*I)->VisitNode(N, NextNode, PDB, *R)) {
+          PD.getActivePath().push_front(p);
+          updateStackPiecesWithMessage(p, CallStack);
+        }
+      }
+    }
+  }
+
+  if (!PDB.getBugReport()->isValid())
+    return false;
+
+  // After constructing the full PathDiagnostic, do a pass over it to compact
+  // PathDiagnosticPieces that occur within a macro.
+  CompactPathDiagnostic(PD.getMutablePieces(), PDB.getSourceManager());
+  return true;
+}
+
+//===----------------------------------------------------------------------===//
+// "Extensive" PathDiagnostic generation.
+//===----------------------------------------------------------------------===//
+
+static bool IsControlFlowExpr(const Stmt *S) {
+  const Expr *E = dyn_cast<Expr>(S);
+
+  if (!E)
+    return false;
+
+  E = E->IgnoreParenCasts();
+
+  if (isa<AbstractConditionalOperator>(E))
+    return true;
+
+  if (const BinaryOperator *B = dyn_cast<BinaryOperator>(E))
+    if (B->isLogicalOp())
+      return true;
+
+  return false;
+}
+
+namespace {
+class ContextLocation : public PathDiagnosticLocation {
+  bool IsDead;
+public:
+  ContextLocation(const PathDiagnosticLocation &L, bool isdead = false)
+    : PathDiagnosticLocation(L), IsDead(isdead) {}
+
+  void markDead() { IsDead = true; }
+  bool isDead() const { return IsDead; }
+};
+
+static PathDiagnosticLocation cleanUpLocation(PathDiagnosticLocation L,
+                                              const LocationContext *LC,
+                                              bool firstCharOnly = false) {
+  if (const Stmt *S = L.asStmt()) {
+    const Stmt *Original = S;
+    while (1) {
+      // Adjust the location for some expressions that are best referenced
+      // by one of their subexpressions.
+      switch (S->getStmtClass()) {
+        default:
+          break;
+        case Stmt::ParenExprClass:
+        case Stmt::GenericSelectionExprClass:
+          S = cast<Expr>(S)->IgnoreParens();
+          firstCharOnly = true;
+          continue;
+        case Stmt::BinaryConditionalOperatorClass:
+        case Stmt::ConditionalOperatorClass:
+          S = cast<AbstractConditionalOperator>(S)->getCond();
+          firstCharOnly = true;
+          continue;
+        case Stmt::ChooseExprClass:
+          S = cast<ChooseExpr>(S)->getCond();
+          firstCharOnly = true;
+          continue;
+        case Stmt::BinaryOperatorClass:
+          S = cast<BinaryOperator>(S)->getLHS();
+          firstCharOnly = true;
+          continue;
+      }
+
+      break;
+    }
+
+    if (S != Original)
+      L = PathDiagnosticLocation(S, L.getManager(), LC);
+  }
+
+  if (firstCharOnly)
+    L  = PathDiagnosticLocation::createSingleLocation(L);
+  
+  return L;
+}
+
+class EdgeBuilder {
+  std::vector<ContextLocation> CLocs;
+  typedef std::vector<ContextLocation>::iterator iterator;
+  PathDiagnostic &PD;
+  PathDiagnosticBuilder &PDB;
+  PathDiagnosticLocation PrevLoc;
+
+  bool IsConsumedExpr(const PathDiagnosticLocation &L);
+
+  bool containsLocation(const PathDiagnosticLocation &Container,
+                        const PathDiagnosticLocation &Containee);
+
+  PathDiagnosticLocation getContextLocation(const PathDiagnosticLocation &L);
+
+
+
+  void popLocation() {
+    if (!CLocs.back().isDead() && CLocs.back().asLocation().isFileID()) {
+      // For contexts, we only one the first character as the range.
+      rawAddEdge(cleanUpLocation(CLocs.back(), PDB.LC, true));
+    }
+    CLocs.pop_back();
+  }
+
+public:
+  EdgeBuilder(PathDiagnostic &pd, PathDiagnosticBuilder &pdb)
+    : PD(pd), PDB(pdb) {
+
+      // If the PathDiagnostic already has pieces, add the enclosing statement
+      // of the first piece as a context as well.
+      if (!PD.path.empty()) {
+        PrevLoc = (*PD.path.begin())->getLocation();
+
+        if (const Stmt *S = PrevLoc.asStmt())
+          addExtendedContext(PDB.getEnclosingStmtLocation(S).asStmt());
+      }
+  }
+
+  ~EdgeBuilder() {
+    while (!CLocs.empty()) popLocation();
+    
+    // Finally, add an initial edge from the start location of the first
+    // statement (if it doesn't already exist).
+    PathDiagnosticLocation L = PathDiagnosticLocation::createDeclBegin(
+                                                       PDB.LC,
+                                                       PDB.getSourceManager());
+    if (L.isValid())
+      rawAddEdge(L);
+  }
+
+  void flushLocations() {
+    while (!CLocs.empty())
+      popLocation();
+    PrevLoc = PathDiagnosticLocation();
+  }
+  
+  void addEdge(PathDiagnosticLocation NewLoc, bool alwaysAdd = false,
+               bool IsPostJump = false);
+
+  void rawAddEdge(PathDiagnosticLocation NewLoc);
+
+  void addContext(const Stmt *S);
+  void addContext(const PathDiagnosticLocation &L);
+  void addExtendedContext(const Stmt *S);
+};
+} // end anonymous namespace
+
+
+PathDiagnosticLocation
+EdgeBuilder::getContextLocation(const PathDiagnosticLocation &L) {
+  if (const Stmt *S = L.asStmt()) {
+    if (IsControlFlowExpr(S))
+      return L;
+
+    return PDB.getEnclosingStmtLocation(S);
+  }
+
+  return L;
+}
+
+bool EdgeBuilder::containsLocation(const PathDiagnosticLocation &Container,
+                                   const PathDiagnosticLocation &Containee) {
+
+  if (Container == Containee)
+    return true;
+
+  if (Container.asDecl())
+    return true;
+
+  if (const Stmt *S = Containee.asStmt())
+    if (const Stmt *ContainerS = Container.asStmt()) {
+      while (S) {
+        if (S == ContainerS)
+          return true;
+        S = PDB.getParent(S);
+      }
+      return false;
+    }
+
+  // Less accurate: compare using source ranges.
+  SourceRange ContainerR = Container.asRange();
+  SourceRange ContaineeR = Containee.asRange();
+
+  SourceManager &SM = PDB.getSourceManager();
+  SourceLocation ContainerRBeg = SM.getExpansionLoc(ContainerR.getBegin());
+  SourceLocation ContainerREnd = SM.getExpansionLoc(ContainerR.getEnd());
+  SourceLocation ContaineeRBeg = SM.getExpansionLoc(ContaineeR.getBegin());
+  SourceLocation ContaineeREnd = SM.getExpansionLoc(ContaineeR.getEnd());
+
+  unsigned ContainerBegLine = SM.getExpansionLineNumber(ContainerRBeg);
+  unsigned ContainerEndLine = SM.getExpansionLineNumber(ContainerREnd);
+  unsigned ContaineeBegLine = SM.getExpansionLineNumber(ContaineeRBeg);
+  unsigned ContaineeEndLine = SM.getExpansionLineNumber(ContaineeREnd);
+
+  assert(ContainerBegLine <= ContainerEndLine);
+  assert(ContaineeBegLine <= ContaineeEndLine);
+
+  return (ContainerBegLine <= ContaineeBegLine &&
+          ContainerEndLine >= ContaineeEndLine &&
+          (ContainerBegLine != ContaineeBegLine ||
+           SM.getExpansionColumnNumber(ContainerRBeg) <=
+           SM.getExpansionColumnNumber(ContaineeRBeg)) &&
+          (ContainerEndLine != ContaineeEndLine ||
+           SM.getExpansionColumnNumber(ContainerREnd) >=
+           SM.getExpansionColumnNumber(ContaineeREnd)));
+}
+
+void EdgeBuilder::rawAddEdge(PathDiagnosticLocation NewLoc) {
+  if (!PrevLoc.isValid()) {
+    PrevLoc = NewLoc;
+    return;
+  }
+
+  const PathDiagnosticLocation &NewLocClean = cleanUpLocation(NewLoc, PDB.LC);
+  const PathDiagnosticLocation &PrevLocClean = cleanUpLocation(PrevLoc, PDB.LC);
+
+  if (PrevLocClean.asLocation().isInvalid()) {
+    PrevLoc = NewLoc;
+    return;
+  }
+  
+  if (NewLocClean.asLocation() == PrevLocClean.asLocation())
+    return;
+
+  // FIXME: Ignore intra-macro edges for now.
+  if (NewLocClean.asLocation().getExpansionLoc() ==
+      PrevLocClean.asLocation().getExpansionLoc())
+    return;
+
+  PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(NewLocClean, PrevLocClean));
+  PrevLoc = NewLoc;
+}
+
+void EdgeBuilder::addEdge(PathDiagnosticLocation NewLoc, bool alwaysAdd,
+                          bool IsPostJump) {
+
+  if (!alwaysAdd && NewLoc.asLocation().isMacroID())
+    return;
+
+  const PathDiagnosticLocation &CLoc = getContextLocation(NewLoc);
+
+  while (!CLocs.empty()) {
+    ContextLocation &TopContextLoc = CLocs.back();
+
+    // Is the top location context the same as the one for the new location?
+    if (TopContextLoc == CLoc) {
+      if (alwaysAdd) {
+        if (IsConsumedExpr(TopContextLoc))
+          TopContextLoc.markDead();
+
+        rawAddEdge(NewLoc);
+      }
+
+      if (IsPostJump)
+        TopContextLoc.markDead();
+      return;
+    }
+
+    if (containsLocation(TopContextLoc, CLoc)) {
+      if (alwaysAdd) {
+        rawAddEdge(NewLoc);
+
+        if (IsConsumedExpr(CLoc)) {
+          CLocs.push_back(ContextLocation(CLoc, /*IsDead=*/true));
+          return;
+        }
+      }
+
+      CLocs.push_back(ContextLocation(CLoc, /*IsDead=*/IsPostJump));
+      return;
+    }
+
+    // Context does not contain the location.  Flush it.
+    popLocation();
+  }
+
+  // If we reach here, there is no enclosing context.  Just add the edge.
+  rawAddEdge(NewLoc);
+}
+
+bool EdgeBuilder::IsConsumedExpr(const PathDiagnosticLocation &L) {
+  if (const Expr *X = dyn_cast_or_null<Expr>(L.asStmt()))
+    return PDB.getParentMap().isConsumedExpr(X) && !IsControlFlowExpr(X);
+
+  return false;
+}
+
+void EdgeBuilder::addExtendedContext(const Stmt *S) {
+  if (!S)
+    return;
+
+  const Stmt *Parent = PDB.getParent(S);
+  while (Parent) {
+    if (isa<CompoundStmt>(Parent))
+      Parent = PDB.getParent(Parent);
+    else
+      break;
+  }
+
+  if (Parent) {
+    switch (Parent->getStmtClass()) {
+      case Stmt::DoStmtClass:
+      case Stmt::ObjCAtSynchronizedStmtClass:
+        addContext(Parent);
+      default:
+        break;
+    }
+  }
+
+  addContext(S);
+}
+
+void EdgeBuilder::addContext(const Stmt *S) {
+  if (!S)
+    return;
+
+  PathDiagnosticLocation L(S, PDB.getSourceManager(), PDB.LC);
+  addContext(L);
+}
+
+void EdgeBuilder::addContext(const PathDiagnosticLocation &L) {
+  while (!CLocs.empty()) {
+    const PathDiagnosticLocation &TopContextLoc = CLocs.back();
+
+    // Is the top location context the same as the one for the new location?
+    if (TopContextLoc == L)
+      return;
+
+    if (containsLocation(TopContextLoc, L)) {
+      CLocs.push_back(L);
+      return;
+    }
+
+    // Context does not contain the location.  Flush it.
+    popLocation();
+  }
+
+  CLocs.push_back(L);
+}
+
+// Cone-of-influence: support the reverse propagation of "interesting" symbols
+// and values by tracing interesting calculations backwards through evaluated
+// expressions along a path.  This is probably overly complicated, but the idea
+// is that if an expression computed an "interesting" value, the child
+// expressions are are also likely to be "interesting" as well (which then
+// propagates to the values they in turn compute).  This reverse propagation
+// is needed to track interesting correlations across function call boundaries,
+// where formal arguments bind to actual arguments, etc.  This is also needed
+// because the constraint solver sometimes simplifies certain symbolic values
+// into constants when appropriate, and this complicates reasoning about
+// interesting values.
+typedef llvm::DenseSet<const Expr *> InterestingExprs;
+
+static void reversePropagateIntererstingSymbols(BugReport &R,
+                                                InterestingExprs &IE,
+                                                const ProgramState *State,
+                                                const Expr *Ex,
+                                                const LocationContext *LCtx) {
+  SVal V = State->getSVal(Ex, LCtx);
+  if (!(R.isInteresting(V) || IE.count(Ex)))
+    return;
+  
+  switch (Ex->getStmtClass()) {
+    default:
+      if (!isa<CastExpr>(Ex))
+        break;
+      // Fall through.
+    case Stmt::BinaryOperatorClass:
+    case Stmt::UnaryOperatorClass: {
+      for (Stmt::const_child_iterator CI = Ex->child_begin(),
+            CE = Ex->child_end();
+            CI != CE; ++CI) {
+        if (const Expr *child = dyn_cast_or_null<Expr>(*CI)) {
+          IE.insert(child);
+          SVal ChildV = State->getSVal(child, LCtx);
+          R.markInteresting(ChildV);
+        }
+        break;
+      }
+    }
+  }
+  
+  R.markInteresting(V);
+}
+
+static void reversePropagateInterestingSymbols(BugReport &R,
+                                               InterestingExprs &IE,
+                                               const ProgramState *State,
+                                               const LocationContext *CalleeCtx,
+                                               const LocationContext *CallerCtx)
+{
+  // FIXME: Handle non-CallExpr-based CallEvents.
+  const StackFrameContext *Callee = CalleeCtx->getCurrentStackFrame();
+  const Stmt *CallSite = Callee->getCallSite();
+  if (const CallExpr *CE = dyn_cast_or_null<CallExpr>(CallSite)) {
+    if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CalleeCtx->getDecl())) {
+      FunctionDecl::param_const_iterator PI = FD->param_begin(), 
+                                         PE = FD->param_end();
+      CallExpr::const_arg_iterator AI = CE->arg_begin(), AE = CE->arg_end();
+      for (; AI != AE && PI != PE; ++AI, ++PI) {
+        if (const Expr *ArgE = *AI) {
+          if (const ParmVarDecl *PD = *PI) {
+            Loc LV = State->getLValue(PD, CalleeCtx);
+            if (R.isInteresting(LV) || R.isInteresting(State->getRawSVal(LV)))
+              IE.insert(ArgE);
+          }
+        }
+      }
+    }
+  }
+}
+
+//===----------------------------------------------------------------------===//
+// Functions for determining if a loop was executed 0 times.
+//===----------------------------------------------------------------------===//
+
+/// Return true if the terminator is a loop and the destination is the
+/// false branch.
+static bool isLoopJumpPastBody(const Stmt *Term, const BlockEdge *BE) {
+  switch (Term->getStmtClass()) {
+    case Stmt::ForStmtClass:
+    case Stmt::WhileStmtClass:
+    case Stmt::ObjCForCollectionStmtClass:
+      break;
+    default:
+      // Note that we intentionally do not include do..while here.
+      return false;
+  }
+
+  // Did we take the false branch?
+  const CFGBlock *Src = BE->getSrc();
+  assert(Src->succ_size() == 2);
+  return (*(Src->succ_begin()+1) == BE->getDst());
+}
+
+static bool isContainedByStmt(ParentMap &PM, const Stmt *S, const Stmt *SubS) {
+  while (SubS) {
+    if (SubS == S)
+      return true;
+    SubS = PM.getParent(SubS);
+  }
+  return false;
+}
+
+static const Stmt *getStmtBeforeCond(ParentMap &PM, const Stmt *Term,
+                                     const ExplodedNode *N) {
+  while (N) {
+    Optional<StmtPoint> SP = N->getLocation().getAs<StmtPoint>();
+    if (SP) {
+      const Stmt *S = SP->getStmt();
+      if (!isContainedByStmt(PM, Term, S))
+        return S;
+    }
+    N = N->getFirstPred();
+  }
+  return 0;
+}
+
+static bool isInLoopBody(ParentMap &PM, const Stmt *S, const Stmt *Term) {
+  const Stmt *LoopBody = 0;
+  switch (Term->getStmtClass()) {
+    case Stmt::ForStmtClass: {
+      const ForStmt *FS = cast<ForStmt>(Term);
+      if (isContainedByStmt(PM, FS->getInc(), S))
+        return true;
+      LoopBody = FS->getBody();
+      break;
+    }
+    case Stmt::ObjCForCollectionStmtClass: {
+      const ObjCForCollectionStmt *FC = cast<ObjCForCollectionStmt>(Term);
+      LoopBody = FC->getBody();
+      break;
+    }
+    case Stmt::WhileStmtClass:
+      LoopBody = cast<WhileStmt>(Term)->getBody();
+      break;
+    default:
+      return false;
+  }
+  return isContainedByStmt(PM, LoopBody, S);
+}
+
+//===----------------------------------------------------------------------===//
+// Top-level logic for generating extensive path diagnostics.
+//===----------------------------------------------------------------------===//
+
+static bool GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
+                                            PathDiagnosticBuilder &PDB,
+                                            const ExplodedNode *N,
+                                            LocationContextMap &LCM,
+                                      ArrayRef<BugReporterVisitor *> visitors) {
+  EdgeBuilder EB(PD, PDB);
+  const SourceManager& SM = PDB.getSourceManager();
+  StackDiagVector CallStack;
+  InterestingExprs IE;
+
+  const ExplodedNode *NextNode = N->pred_empty() ? NULL : *(N->pred_begin());
+  while (NextNode) {
+    N = NextNode;
+    NextNode = N->getFirstPred();
+    ProgramPoint P = N->getLocation();
+
+    do {
+      if (Optional<PostStmt> PS = P.getAs<PostStmt>()) {
+        if (const Expr *Ex = PS->getStmtAs<Expr>())
+          reversePropagateIntererstingSymbols(*PDB.getBugReport(), IE,
+                                              N->getState().getPtr(), Ex,
+                                              N->getLocationContext());
+      }
+      
+      if (Optional<CallExitEnd> CE = P.getAs<CallExitEnd>()) {
+        const Stmt *S = CE->getCalleeContext()->getCallSite();
+        if (const Expr *Ex = dyn_cast_or_null<Expr>(S)) {
+            reversePropagateIntererstingSymbols(*PDB.getBugReport(), IE,
+                                                N->getState().getPtr(), Ex,
+                                                N->getLocationContext());
+        }
+        
+        PathDiagnosticCallPiece *C =
+          PathDiagnosticCallPiece::construct(N, *CE, SM);
+        LCM[&C->path] = CE->getCalleeContext();
+
+        EB.addEdge(C->callReturn, /*AlwaysAdd=*/true, /*IsPostJump=*/true);
+        EB.flushLocations();
+
+        PD.getActivePath().push_front(C);
+        PD.pushActivePath(&C->path);
+        CallStack.push_back(StackDiagPair(C, N));
+        break;
+      }
+      
+      // Pop the call hierarchy if we are done walking the contents
+      // of a function call.
+      if (Optional<CallEnter> CE = P.getAs<CallEnter>()) {
+        // Add an edge to the start of the function.
+        const Decl *D = CE->getCalleeContext()->getDecl();
+        PathDiagnosticLocation pos =
+          PathDiagnosticLocation::createBegin(D, SM);
+        EB.addEdge(pos);
+        
+        // Flush all locations, and pop the active path.
+        bool VisitedEntireCall = PD.isWithinCall();
+        EB.flushLocations();
+        PD.popActivePath();
+        PDB.LC = N->getLocationContext();
+
+        // Either we just added a bunch of stuff to the top-level path, or
+        // we have a previous CallExitEnd.  If the former, it means that the
+        // path terminated within a function call.  We must then take the
+        // current contents of the active path and place it within
+        // a new PathDiagnosticCallPiece.
+        PathDiagnosticCallPiece *C;
+        if (VisitedEntireCall) {
+          C = cast<PathDiagnosticCallPiece>(PD.getActivePath().front());
+        } else {
+          const Decl *Caller = CE->getLocationContext()->getDecl();
+          C = PathDiagnosticCallPiece::construct(PD.getActivePath(), Caller);
+          LCM[&C->path] = CE->getCalleeContext();
+        }
+
+        C->setCallee(*CE, SM);
+        EB.addContext(C->getLocation());
+
+        if (!CallStack.empty()) {
+          assert(CallStack.back().first == C);
+          CallStack.pop_back();
+        }
+        break;
+      }
+      
+      // Note that is important that we update the LocationContext
+      // after looking at CallExits.  CallExit basically adds an
+      // edge in the *caller*, so we don't want to update the LocationContext
+      // too soon.
+      PDB.LC = N->getLocationContext();
+
+      // Block edges.
+      if (Optional<BlockEdge> BE = P.getAs<BlockEdge>()) {
+        // Does this represent entering a call?  If so, look at propagating
+        // interesting symbols across call boundaries.
+        if (NextNode) {
+          const LocationContext *CallerCtx = NextNode->getLocationContext();
+          const LocationContext *CalleeCtx = PDB.LC;
+          if (CallerCtx != CalleeCtx) {
+            reversePropagateInterestingSymbols(*PDB.getBugReport(), IE,
+                                               N->getState().getPtr(),
+                                               CalleeCtx, CallerCtx);
+          }
+        }
+       
+        // Are we jumping to the head of a loop?  Add a special diagnostic.
+        if (const Stmt *Loop = BE->getSrc()->getLoopTarget()) {
+          PathDiagnosticLocation L(Loop, SM, PDB.LC);
+          const CompoundStmt *CS = NULL;
+
+          if (const ForStmt *FS = dyn_cast<ForStmt>(Loop))
+            CS = dyn_cast<CompoundStmt>(FS->getBody());
+          else if (const WhileStmt *WS = dyn_cast<WhileStmt>(Loop))
+            CS = dyn_cast<CompoundStmt>(WS->getBody());
+
+          PathDiagnosticEventPiece *p =
+            new PathDiagnosticEventPiece(L,
+                                        "Looping back to the head of the loop");
+          p->setPrunable(true);
+
+          EB.addEdge(p->getLocation(), true);
+          PD.getActivePath().push_front(p);
+
+          if (CS) {
+            PathDiagnosticLocation BL =
+              PathDiagnosticLocation::createEndBrace(CS, SM);
+            EB.addEdge(BL);
+          }
+        }
+
+        const CFGBlock *BSrc = BE->getSrc();
+        ParentMap &PM = PDB.getParentMap();
+
+        if (const Stmt *Term = BSrc->getTerminator()) {
+          // Are we jumping past the loop body without ever executing the
+          // loop (because the condition was false)?
+          if (isLoopJumpPastBody(Term, &*BE) &&
+              !isInLoopBody(PM,
+                            getStmtBeforeCond(PM,
+                                              BSrc->getTerminatorCondition(),
+                                              N),
+                            Term)) {
+            PathDiagnosticLocation L(Term, SM, PDB.LC);
+            PathDiagnosticEventPiece *PE =
+                new PathDiagnosticEventPiece(L, "Loop body executed 0 times");
+            PE->setPrunable(true);
+
+            EB.addEdge(PE->getLocation(), true);
+            PD.getActivePath().push_front(PE);
+          }
+
+          // In any case, add the terminator as the current statement
+          // context for control edges.
+          EB.addContext(Term);
+        }
+
+        break;
+      }
+
+      if (Optional<BlockEntrance> BE = P.getAs<BlockEntrance>()) {
+        Optional<CFGElement> First = BE->getFirstElement();
+        if (Optional<CFGStmt> S = First ? First->getAs<CFGStmt>() : None) {
+          const Stmt *stmt = S->getStmt();
+          if (IsControlFlowExpr(stmt)) {
+            // Add the proper context for '&&', '||', and '?'.
+            EB.addContext(stmt);
+          }
+          else
+            EB.addExtendedContext(PDB.getEnclosingStmtLocation(stmt).asStmt());
+        }
+        
+        break;
+      }
+      
+      
+    } while (0);
+
+    if (!NextNode)
+      continue;
+
+    // Add pieces from custom visitors.
+    BugReport *R = PDB.getBugReport();
+    for (ArrayRef<BugReporterVisitor *>::iterator I = visitors.begin(),
+                                                  E = visitors.end();
+         I != E; ++I) {
+      if (PathDiagnosticPiece *p = (*I)->VisitNode(N, NextNode, PDB, *R)) {
+        const PathDiagnosticLocation &Loc = p->getLocation();
+        EB.addEdge(Loc, true);
+        PD.getActivePath().push_front(p);
+        updateStackPiecesWithMessage(p, CallStack);
+
+        if (const Stmt *S = Loc.asStmt())
+          EB.addExtendedContext(PDB.getEnclosingStmtLocation(S).asStmt());
+      }
+    }
+  }
+
+  return PDB.getBugReport()->isValid();
+}
+
+/// \brief Adds a sanitized control-flow diagnostic edge to a path.
+static void addEdgeToPath(PathPieces &path,
+                          PathDiagnosticLocation &PrevLoc,
+                          PathDiagnosticLocation NewLoc,
+                          const LocationContext *LC) {
+  if (!NewLoc.isValid())
+    return;
+
+  SourceLocation NewLocL = NewLoc.asLocation();
+  if (NewLocL.isInvalid() || NewLocL.isMacroID())
+    return;
+
+  if (!PrevLoc.isValid()) {
+    PrevLoc = NewLoc;
+    return;
+  }
+
+  // FIXME: ignore intra-macro edges for now.
+  if (NewLoc.asLocation().getExpansionLoc() ==
+      PrevLoc.asLocation().getExpansionLoc())
+    return;
+
+  path.push_front(new PathDiagnosticControlFlowPiece(NewLoc,
+                                                     PrevLoc));
+  PrevLoc = NewLoc;
+}
+
+static bool
+GenerateAlternateExtensivePathDiagnostic(PathDiagnostic& PD,
+                                         PathDiagnosticBuilder &PDB,
+                                         const ExplodedNode *N,
+                                         LocationContextMap &LCM,
+                                      ArrayRef<BugReporterVisitor *> visitors) {
+
+  BugReport *report = PDB.getBugReport();
+  const SourceManager& SM = PDB.getSourceManager();
+  StackDiagVector CallStack;
+  InterestingExprs IE;
+
+  // Record the last location for a given visited stack frame.
+  llvm::DenseMap<const StackFrameContext *, PathDiagnosticLocation>
+    PrevLocMap;
+
+  const ExplodedNode *NextNode = N->getFirstPred();
+  while (NextNode) {
+    N = NextNode;
+    NextNode = N->getFirstPred();
+    ProgramPoint P = N->getLocation();
+    const LocationContext *LC = N->getLocationContext();
+    assert(!LCM[&PD.getActivePath()] || LCM[&PD.getActivePath()] == LC);
+    LCM[&PD.getActivePath()] = LC;
+    PathDiagnosticLocation &PrevLoc = PrevLocMap[LC->getCurrentStackFrame()];
+
+    do {
+      if (Optional<PostStmt> PS = P.getAs<PostStmt>()) {
+        // For expressions, make sure we propagate the
+        // interesting symbols correctly.
+        if (const Expr *Ex = PS->getStmtAs<Expr>())
+          reversePropagateIntererstingSymbols(*PDB.getBugReport(), IE,
+                                              N->getState().getPtr(), Ex,
+                                              N->getLocationContext());
+
+        PathDiagnosticLocation L =
+          PathDiagnosticLocation(PS->getStmt(), SM, LC);
+        addEdgeToPath(PD.getActivePath(), PrevLoc, L, LC);
+        break;
+      }
+
+      // Have we encountered an exit from a function call?
+      if (Optional<CallExitEnd> CE = P.getAs<CallExitEnd>()) {
+        const Stmt *S = CE->getCalleeContext()->getCallSite();
+        // Propagate the interesting symbols accordingly.
+        if (const Expr *Ex = dyn_cast_or_null<Expr>(S)) {
+          reversePropagateIntererstingSymbols(*PDB.getBugReport(), IE,
+                                              N->getState().getPtr(), Ex,
+                                              N->getLocationContext());
+        }
+
+        // We are descending into a call (backwards).  Construct
+        // a new call piece to contain the path pieces for that call.
+        PathDiagnosticCallPiece *C =
+          PathDiagnosticCallPiece::construct(N, *CE, SM);
+
+        // Record the location context for this call piece.
+        LCM[&C->path] = CE->getCalleeContext();
+
+        // Add the edge to the return site.
+        addEdgeToPath(PD.getActivePath(), PrevLoc, C->callReturn, LC);
+
+        // Make the contents of the call the active path for now.
+        PD.pushActivePath(&C->path);
+        CallStack.push_back(StackDiagPair(C, N));
+        break;
+      }
+
+      // Have we encountered an entrance to a call?  It may be
+      // the case that we have not encountered a matching
+      // call exit before this point.  This means that the path
+      // terminated within the call itself.
+      if (Optional<CallEnter> CE = P.getAs<CallEnter>()) {
+        // Add an edge to the start of the function.
+        const Decl *D = CE->getCalleeContext()->getDecl();
+        addEdgeToPath(PD.getActivePath(), PrevLoc,
+                      PathDiagnosticLocation::createBegin(D, SM), LC);
+
+        // Did we visit an entire call?
+        bool VisitedEntireCall = PD.isWithinCall();
+        PD.popActivePath();
+
+        PathDiagnosticCallPiece *C;
+        if (VisitedEntireCall) {
+          C = cast<PathDiagnosticCallPiece>(PD.getActivePath().front());
+        } else {
+          const Decl *Caller = CE->getLocationContext()->getDecl();
+          C = PathDiagnosticCallPiece::construct(PD.getActivePath(), Caller);
+          LCM[&C->path] = CE->getCalleeContext();
+        }
+        C->setCallee(*CE, SM);
+
+        if (!CallStack.empty()) {
+          assert(CallStack.back().first == C);
+          CallStack.pop_back();
+        }
+        break;
+      }
+
+      // Block edges.
+      if (Optional<BlockEdge> BE = P.getAs<BlockEdge>()) {
+        // Does this represent entering a call?  If so, look at propagating
+        // interesting symbols across call boundaries.
+        if (NextNode) {
+          const LocationContext *CallerCtx = NextNode->getLocationContext();
+          const LocationContext *CalleeCtx = PDB.LC;
+          if (CallerCtx != CalleeCtx) {
+            reversePropagateInterestingSymbols(*PDB.getBugReport(), IE,
+                                               N->getState().getPtr(),
+                                               CalleeCtx, CallerCtx);
+          }
+        }
+
+        // Are we jumping to the head of a loop?  Add a special diagnostic.
+        if (const Stmt *Loop = BE->getSrc()->getLoopTarget()) {
+          PathDiagnosticLocation L(Loop, SM, PDB.LC);
+          const CompoundStmt *CS = NULL;
+
+          if (const ForStmt *FS = dyn_cast<ForStmt>(Loop))
+            CS = dyn_cast<CompoundStmt>(FS->getBody());
+          else if (const WhileStmt *WS = dyn_cast<WhileStmt>(Loop))
+            CS = dyn_cast<CompoundStmt>(WS->getBody());
+
+          PathDiagnosticEventPiece *p =
+            new PathDiagnosticEventPiece(L, "Looping back to the head "
+                                            "of the loop");
+          p->setPrunable(true);
+
+          addEdgeToPath(PD.getActivePath(), PrevLoc, p->getLocation(), LC);
+          PD.getActivePath().push_front(p);
+
+          if (CS) {
+            addEdgeToPath(PD.getActivePath(), PrevLoc,
+                          PathDiagnosticLocation::createEndBrace(CS, SM), LC);
+          }
+        }
+        
+        const CFGBlock *BSrc = BE->getSrc();
+        ParentMap &PM = PDB.getParentMap();
+
+        if (const Stmt *Term = BSrc->getTerminator()) {
+          // Are we jumping past the loop body without ever executing the
+          // loop (because the condition was false)?
+          if (isLoopJumpPastBody(Term, &*BE) &&
+              !isInLoopBody(PM,
+                            getStmtBeforeCond(PM,
+                                              BSrc->getTerminatorCondition(),
+                                              N),
+                            Term))
+          {
+            PathDiagnosticLocation L(Term, SM, PDB.LC);
+            PathDiagnosticEventPiece *PE =
+              new PathDiagnosticEventPiece(L, "Loop body executed 0 times");
+            PE->setPrunable(true);
+            addEdgeToPath(PD.getActivePath(), PrevLoc,
+                          PE->getLocation(), LC);
+            PD.getActivePath().push_front(PE);
+          }
+        }
+        break;
+      }
+    } while (0);
+
+    if (!NextNode)
+      continue;
+
+    // Add pieces from custom visitors.
+    for (ArrayRef<BugReporterVisitor *>::iterator I = visitors.begin(),
+         E = visitors.end();
+         I != E; ++I) {
+      if (PathDiagnosticPiece *p = (*I)->VisitNode(N, NextNode, PDB, *report)) {
+        addEdgeToPath(PD.getActivePath(), PrevLoc, p->getLocation(), LC);
+        PD.getActivePath().push_front(p);
+        updateStackPiecesWithMessage(p, CallStack);
+      }
+    }
+  }
+
+  return report->isValid();
+}
+
+const Stmt *getLocStmt(PathDiagnosticLocation L) {
+  if (!L.isValid())
+    return 0;
+  return L.asStmt();
+}
+
+const Stmt *getStmtParent(const Stmt *S, ParentMap &PM) {
+  if (!S)
+    return 0;
+  return PM.getParentIgnoreParens(S);
+}
+
+#if 0
+static bool isConditionForTerminator(const Stmt *S, const Stmt *Cond) {
+  // Note that we intentionally to do not handle || and && here.
+  switch (S->getStmtClass()) {
+    case Stmt::ForStmtClass:
+      return cast<ForStmt>(S)->getCond() == Cond;
+    case Stmt::WhileStmtClass:
+      return cast<WhileStmt>(S)->getCond() == Cond;
+    case Stmt::DoStmtClass:
+      return cast<DoStmt>(S)->getCond() == Cond;
+    case Stmt::ChooseExprClass:
+      return cast<ChooseExpr>(S)->getCond() == Cond;
+    case Stmt::IndirectGotoStmtClass:
+      return cast<IndirectGotoStmt>(S)->getTarget() == Cond;
+    case Stmt::SwitchStmtClass:
+      return cast<SwitchStmt>(S)->getCond() == Cond;
+    case Stmt::BinaryConditionalOperatorClass:
+      return cast<BinaryConditionalOperator>(S)->getCond() == Cond;
+    case Stmt::ConditionalOperatorClass:
+      return cast<ConditionalOperator>(S)->getCond() == Cond;
+    case Stmt::ObjCForCollectionStmtClass:
+      return cast<ObjCForCollectionStmt>(S)->getElement() == Cond;
+    default:
+      return false;
+  }
+}
+#endif
+
+typedef llvm::DenseSet<const PathDiagnosticControlFlowPiece *>
+        ControlFlowBarrierSet;
+
+typedef llvm::DenseSet<const PathDiagnosticCallPiece *>
+        OptimizedCallsSet;
+
+static bool isBarrier(ControlFlowBarrierSet &CFBS,
+                      const PathDiagnosticControlFlowPiece *P) {
+  return CFBS.count(P);
+}
+
+static bool optimizeEdges(PathPieces &path, SourceManager &SM,
+                          ControlFlowBarrierSet &CFBS,
+                          OptimizedCallsSet &OCS,
+                          LocationContextMap &LCM) {
+  bool hasChanges = false;
+  const LocationContext *LC = LCM[&path];
+  assert(LC);
+  bool isFirst = true;
+
+  for (PathPieces::iterator I = path.begin(), E = path.end(); I != E; ) {
+    bool wasFirst = isFirst;
+    isFirst = false;
+
+    // Optimize subpaths.
+    if (PathDiagnosticCallPiece *CallI = dyn_cast<PathDiagnosticCallPiece>(*I)){
+      // Record the fact that a call has been optimized so we only do the
+      // effort once.
+      if (!OCS.count(CallI)) {
+        while (optimizeEdges(CallI->path, SM, CFBS, OCS, LCM)) {}
+        OCS.insert(CallI);
+      }
+      ++I;
+      continue;
+    }
+
+    // Pattern match the current piece and its successor.
+    PathDiagnosticControlFlowPiece *PieceI =
+      dyn_cast<PathDiagnosticControlFlowPiece>(*I);
+
+    if (!PieceI) {
+      ++I;
+      continue;
+    }
+
+    ParentMap &PM = LC->getParentMap();
+    const Stmt *s1Start = getLocStmt(PieceI->getStartLocation());
+    const Stmt *s1End   = getLocStmt(PieceI->getEndLocation());
+    const Stmt *level1 = getStmtParent(s1Start, PM);
+    const Stmt *level2 = getStmtParent(s1End, PM);
+
+    if (wasFirst) {
+#if 0
+      // Apply the "first edge" case for Rule V. here.
+      if (s1Start && level1 && isConditionForTerminator(level1, s1Start)) {
+        PathDiagnosticLocation NewLoc(level2, SM, LC);
+        PieceI->setStartLocation(NewLoc);
+        CFBS.insert(PieceI);
+        return true;
+      }
+#endif
+      // Apply the "first edge" case for Rule III. here.
+      if (!isBarrier(CFBS, PieceI) &&
+          level1 && level2 && level2 == PM.getParent(level1)) {
+        path.erase(I);
+        // Since we are erasing the current edge at the start of the
+        // path, just return now so we start analyzing the start of the path
+        // again.
+        return true;
+      }
+    }
+
+    PathPieces::iterator NextI = I; ++NextI;
+    if (NextI == E)
+      break;
+
+    PathDiagnosticControlFlowPiece *PieceNextI =
+      dyn_cast<PathDiagnosticControlFlowPiece>(*NextI);
+
+    if (!PieceNextI) {
+      ++I;
+      continue;
+    }
+
+    const Stmt *s2Start = getLocStmt(PieceNextI->getStartLocation());
+    const Stmt *s2End   = getLocStmt(PieceNextI->getEndLocation());
+    const Stmt *level3 = getStmtParent(s2Start, PM);
+    const Stmt *level4 = getStmtParent(s2End, PM);
+
+    // Rule I.
+    //
+    // If we have two consecutive control edges whose end/begin locations
+    // are at the same level (e.g. statements or top-level expressions within
+    // a compound statement, or siblings share a single ancestor expression),
+    // then merge them if they have no interesting intermediate event.
+    //
+    // For example:
+    //
+    // (1.1 -> 1.2) -> (1.2 -> 1.3) becomes (1.1 -> 1.3) because the common
+    // parent is '1'.  Here 'x.y.z' represents the hierarchy of statements.
+    //
+    // NOTE: this will be limited later in cases where we add barriers
+    // to prevent this optimization.
+    //
+    if (level1 && level1 == level2 && level1 == level3 && level1 == level4) {
+      PieceI->setEndLocation(PieceNextI->getEndLocation());
+      path.erase(NextI);
+      hasChanges = true;
+      continue;
+    }
+
+    // Rule II.
+    //
+    // If we have two consecutive control edges where we decend to a
+    // subexpression and then pop out merge them.
+    //
+    // NOTE: this will be limited later in cases where we add barriers
+    // to prevent this optimization.
+    //
+    // For example:
+    //
+    // (1.1 -> 1.1.1) -> (1.1.1 -> 1.2) becomes (1.1 -> 1.2).
+    if (level1 && level2 &&
+        level1 == level4 &&
+        level2 == level3 && PM.getParentIgnoreParens(level2) == level1) {
+      PieceI->setEndLocation(PieceNextI->getEndLocation());
+      path.erase(NextI);
+      hasChanges = true;
+      continue;
+    }
+
+    // Rule III.
+    //
+    // Eliminate unnecessary edges where we descend to a subexpression from
+    // a statement at the same level as our parent.
+    //
+    // NOTE: this will be limited later in cases where we add barriers
+    // to prevent this optimization.
+    //
+    // For example:
+    //
+    // (1.1 -> 1.1.1) -> (1.1.1 -> X) becomes (1.1 -> X).
+    //
+    if (level1 && level2 && level1 == PM.getParentIgnoreParens(level2)) {
+      PieceI->setEndLocation(PieceNextI->getEndLocation());
+      path.erase(NextI);
+      hasChanges = true;
+      continue;
+    }
+
+    // Rule IV.
+    //
+    // Eliminate unnecessary edges where we ascend from a subexpression to
+    // a statement at the same level as our parent.
+    //
+    // NOTE: this will be limited later in cases where we add barriers
+    // to prevent this optimization.
+    //
+    // For example:
+    //
+    // (X -> 1.1.1) -> (1.1.1 -> 1.1) becomes (X -> 1.1).
+    // [first edge] (1.1.1 -> 1.1) -> eliminate
+    //
+    if (level2 && level4 && level2 == level3 && level4 == PM.getParent(level2)){
+      PieceI->setEndLocation(PieceNextI->getEndLocation());
+      path.erase(NextI);
+      hasChanges = true;
+      continue;
+    }
+#if 0
+    // Rule V.
+    //
+    // Replace terminator conditions with terminators when the condition
+    // itself has no control-flow.
+    //
+    // For example:
+    //
+    // (X -> condition) -> (condition -> Y) becomes (X -> term) -> (term -> Y)
+    // [first edge] (condition -> Y) becomes (term -> Y)
+    //
+    // This applies to 'if', 'for', 'while', 'do .. while', 'switch'...
+    //
+    if (!isBarrier(CFBS, PieceNextI) &&
+        s1End && s1End == s2Start && level2) {
+      if (isConditionForTerminator(level2, s1End)) {
+        PathDiagnosticLocation NewLoc(level2, SM, LC);
+        PieceI->setEndLocation(NewLoc);
+        PieceNextI->setStartLocation(NewLoc);
+        CFBS.insert(PieceI);
+        hasChanges = true;
+        continue;
+      }
+
+    }
+#endif
+
+    // No changes at this index?  Move to the next one.
+    ++I;
+  }
+
+  // No changes.
+  return hasChanges;
+}
+
+//===----------------------------------------------------------------------===//
+// Methods for BugType and subclasses.
+//===----------------------------------------------------------------------===//
+BugType::~BugType() { }
+
+void BugType::FlushReports(BugReporter &BR) {}
+
+void BuiltinBug::anchor() {}
+
+//===----------------------------------------------------------------------===//
+// Methods for BugReport and subclasses.
+//===----------------------------------------------------------------------===//
+
+void BugReport::NodeResolver::anchor() {}
+
+void BugReport::addVisitor(BugReporterVisitor* visitor) {
+  if (!visitor)
+    return;
+
+  llvm::FoldingSetNodeID ID;
+  visitor->Profile(ID);
+  void *InsertPos;
+
+  if (CallbacksSet.FindNodeOrInsertPos(ID, InsertPos)) {
+    delete visitor;
+    return;
+  }
+
+  CallbacksSet.InsertNode(visitor, InsertPos);
+  Callbacks.push_back(visitor);
+  ++ConfigurationChangeToken;
+}
+
+BugReport::~BugReport() {
+  for (visitor_iterator I = visitor_begin(), E = visitor_end(); I != E; ++I) {
+    delete *I;
+  }
+  while (!interestingSymbols.empty()) {
+    popInterestingSymbolsAndRegions();
+  }
+}
+
+const Decl *BugReport::getDeclWithIssue() const {
+  if (DeclWithIssue)
+    return DeclWithIssue;
+  
+  const ExplodedNode *N = getErrorNode();
+  if (!N)
+    return 0;
+  
+  const LocationContext *LC = N->getLocationContext();
+  return LC->getCurrentStackFrame()->getDecl();
+}
+
+void BugReport::Profile(llvm::FoldingSetNodeID& hash) const {
+  hash.AddPointer(&BT);
+  hash.AddString(Description);
+  PathDiagnosticLocation UL = getUniqueingLocation();
+  if (UL.isValid()) {
+    UL.Profile(hash);
+  } else if (Location.isValid()) {
+    Location.Profile(hash);
+  } else {
+    assert(ErrorNode);
+    hash.AddPointer(GetCurrentOrPreviousStmt(ErrorNode));
+  }
+
+  for (SmallVectorImpl<SourceRange>::const_iterator I =
+      Ranges.begin(), E = Ranges.end(); I != E; ++I) {
+    const SourceRange range = *I;
+    if (!range.isValid())
+      continue;
+    hash.AddInteger(range.getBegin().getRawEncoding());
+    hash.AddInteger(range.getEnd().getRawEncoding());
+  }
+}
+
+void BugReport::markInteresting(SymbolRef sym) {
+  if (!sym)
+    return;
+
+  // If the symbol wasn't already in our set, note a configuration change.
+  if (getInterestingSymbols().insert(sym).second)
+    ++ConfigurationChangeToken;
+
+  if (const SymbolMetadata *meta = dyn_cast<SymbolMetadata>(sym))
+    getInterestingRegions().insert(meta->getRegion());
+}
+
+void BugReport::markInteresting(const MemRegion *R) {
+  if (!R)
+    return;
+
+  // If the base region wasn't already in our set, note a configuration change.
+  R = R->getBaseRegion();
+  if (getInterestingRegions().insert(R).second)
+    ++ConfigurationChangeToken;
+
+  if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R))
+    getInterestingSymbols().insert(SR->getSymbol());
+}
+
+void BugReport::markInteresting(SVal V) {
+  markInteresting(V.getAsRegion());
+  markInteresting(V.getAsSymbol());
+}
+
+void BugReport::markInteresting(const LocationContext *LC) {
+  if (!LC)
+    return;
+  InterestingLocationContexts.insert(LC);
+}
+
+bool BugReport::isInteresting(SVal V) {
+  return isInteresting(V.getAsRegion()) || isInteresting(V.getAsSymbol());
+}
+
+bool BugReport::isInteresting(SymbolRef sym) {
+  if (!sym)
+    return false;
+  // We don't currently consider metadata symbols to be interesting
+  // even if we know their region is interesting. Is that correct behavior?
+  return getInterestingSymbols().count(sym);
+}
+
+bool BugReport::isInteresting(const MemRegion *R) {
+  if (!R)
+    return false;
+  R = R->getBaseRegion();
+  bool b = getInterestingRegions().count(R);
+  if (b)
+    return true;
+  if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R))
+    return getInterestingSymbols().count(SR->getSymbol());
+  return false;
+}
+
+bool BugReport::isInteresting(const LocationContext *LC) {
+  if (!LC)
+    return false;
+  return InterestingLocationContexts.count(LC);
+}
+
+void BugReport::lazyInitializeInterestingSets() {
+  if (interestingSymbols.empty()) {
+    interestingSymbols.push_back(new Symbols());
+    interestingRegions.push_back(new Regions());
+  }
+}
+
+BugReport::Symbols &BugReport::getInterestingSymbols() {
+  lazyInitializeInterestingSets();
+  return *interestingSymbols.back();
+}
+
+BugReport::Regions &BugReport::getInterestingRegions() {
+  lazyInitializeInterestingSets();
+  return *interestingRegions.back();
+}
+
+void BugReport::pushInterestingSymbolsAndRegions() {
+  interestingSymbols.push_back(new Symbols(getInterestingSymbols()));
+  interestingRegions.push_back(new Regions(getInterestingRegions()));
+}
+
+void BugReport::popInterestingSymbolsAndRegions() {
+  delete interestingSymbols.back();
+  interestingSymbols.pop_back();
+  delete interestingRegions.back();
+  interestingRegions.pop_back();
+}
+
+const Stmt *BugReport::getStmt() const {
+  if (!ErrorNode)
+    return 0;
+
+  ProgramPoint ProgP = ErrorNode->getLocation();
+  const Stmt *S = NULL;
+
+  if (Optional<BlockEntrance> BE = ProgP.getAs<BlockEntrance>()) {
+    CFGBlock &Exit = ProgP.getLocationContext()->getCFG()->getExit();
+    if (BE->getBlock() == &Exit)
+      S = GetPreviousStmt(ErrorNode);
+  }
+  if (!S)
+    S = PathDiagnosticLocation::getStmt(ErrorNode);
+
+  return S;
+}
+
+std::pair<BugReport::ranges_iterator, BugReport::ranges_iterator>
+BugReport::getRanges() {
+    // If no custom ranges, add the range of the statement corresponding to
+    // the error node.
+    if (Ranges.empty()) {
+      if (const Expr *E = dyn_cast_or_null<Expr>(getStmt()))
+        addRange(E->getSourceRange());
+      else
+        return std::make_pair(ranges_iterator(), ranges_iterator());
+    }
+
+    // User-specified absence of range info.
+    if (Ranges.size() == 1 && !Ranges.begin()->isValid())
+      return std::make_pair(ranges_iterator(), ranges_iterator());
+
+    return std::make_pair(Ranges.begin(), Ranges.end());
+}
+
+PathDiagnosticLocation BugReport::getLocation(const SourceManager &SM) const {
+  if (ErrorNode) {
+    assert(!Location.isValid() &&
+     "Either Location or ErrorNode should be specified but not both.");
+    return PathDiagnosticLocation::createEndOfPath(ErrorNode, SM);
+  } else {
+    assert(Location.isValid());
+    return Location;
+  }
+
+  return PathDiagnosticLocation();
+}
+
+//===----------------------------------------------------------------------===//
+// Methods for BugReporter and subclasses.
+//===----------------------------------------------------------------------===//
+
+BugReportEquivClass::~BugReportEquivClass() { }
+GRBugReporter::~GRBugReporter() { }
+BugReporterData::~BugReporterData() {}
+
+ExplodedGraph &GRBugReporter::getGraph() { return Eng.getGraph(); }
+
+ProgramStateManager&
+GRBugReporter::getStateManager() { return Eng.getStateManager(); }
+
+BugReporter::~BugReporter() {
+  FlushReports();
+
+  // Free the bug reports we are tracking.
+  typedef std::vector<BugReportEquivClass *> ContTy;
+  for (ContTy::iterator I = EQClassesVector.begin(), E = EQClassesVector.end();
+       I != E; ++I) {
+    delete *I;
+  }
+}
+
+void BugReporter::FlushReports() {
+  if (BugTypes.isEmpty())
+    return;
+
+  // First flush the warnings for each BugType.  This may end up creating new
+  // warnings and new BugTypes.
+  // FIXME: Only NSErrorChecker needs BugType's FlushReports.
+  // Turn NSErrorChecker into a proper checker and remove this.
+  SmallVector<const BugType*, 16> bugTypes;
+  for (BugTypesTy::iterator I=BugTypes.begin(), E=BugTypes.end(); I!=E; ++I)
+    bugTypes.push_back(*I);
+  for (SmallVector<const BugType*, 16>::iterator
+         I = bugTypes.begin(), E = bugTypes.end(); I != E; ++I)
+    const_cast<BugType*>(*I)->FlushReports(*this);
+
+  // We need to flush reports in deterministic order to ensure the order
+  // of the reports is consistent between runs.
+  typedef std::vector<BugReportEquivClass *> ContVecTy;
+  for (ContVecTy::iterator EI=EQClassesVector.begin(), EE=EQClassesVector.end();
+       EI != EE; ++EI){
+    BugReportEquivClass& EQ = **EI;
+    FlushReport(EQ);
+  }
+
+  // BugReporter owns and deletes only BugTypes created implicitly through
+  // EmitBasicReport.
+  // FIXME: There are leaks from checkers that assume that the BugTypes they
+  // create will be destroyed by the BugReporter.
+  for (llvm::StringMap<BugType*>::iterator
+         I = StrBugTypes.begin(), E = StrBugTypes.end(); I != E; ++I)
+    delete I->second;
+
+  // Remove all references to the BugType objects.
+  BugTypes = F.getEmptySet();
+}
+
+//===----------------------------------------------------------------------===//
+// PathDiagnostics generation.
+//===----------------------------------------------------------------------===//
+
+namespace {
+/// A wrapper around a report graph, which contains only a single path, and its
+/// node maps.
+class ReportGraph {
+public:
+  InterExplodedGraphMap BackMap;
+  OwningPtr<ExplodedGraph> Graph;
+  const ExplodedNode *ErrorNode;
+  size_t Index;
+};
+
+/// A wrapper around a trimmed graph and its node maps.
+class TrimmedGraph {
+  InterExplodedGraphMap InverseMap;
+
+  typedef llvm::DenseMap<const ExplodedNode *, unsigned> PriorityMapTy;
+  PriorityMapTy PriorityMap;
+
+  typedef std::pair<const ExplodedNode *, size_t> NodeIndexPair;
+  SmallVector<NodeIndexPair, 32> ReportNodes;
+
+  OwningPtr<ExplodedGraph> G;
+
+  /// A helper class for sorting ExplodedNodes by priority.
+  template <bool Descending>
+  class PriorityCompare {
+    const PriorityMapTy &PriorityMap;
+
+  public:
+    PriorityCompare(const PriorityMapTy &M) : PriorityMap(M) {}
+
+    bool operator()(const ExplodedNode *LHS, const ExplodedNode *RHS) const {
+      PriorityMapTy::const_iterator LI = PriorityMap.find(LHS);
+      PriorityMapTy::const_iterator RI = PriorityMap.find(RHS);
+      PriorityMapTy::const_iterator E = PriorityMap.end();
+
+      if (LI == E)
+        return Descending;
+      if (RI == E)
+        return !Descending;
+
+      return Descending ? LI->second > RI->second
+                        : LI->second < RI->second;
+    }
+
+    bool operator()(const NodeIndexPair &LHS, const NodeIndexPair &RHS) const {
+      return (*this)(LHS.first, RHS.first);
+    }
+  };
+
+public:
+  TrimmedGraph(const ExplodedGraph *OriginalGraph,
+               ArrayRef<const ExplodedNode *> Nodes);
+
+  bool popNextReportGraph(ReportGraph &GraphWrapper);
+};
+}
+
+TrimmedGraph::TrimmedGraph(const ExplodedGraph *OriginalGraph,
+                           ArrayRef<const ExplodedNode *> Nodes) {
+  // The trimmed graph is created in the body of the constructor to ensure
+  // that the DenseMaps have been initialized already.
+  InterExplodedGraphMap ForwardMap;
+  G.reset(OriginalGraph->trim(Nodes, &ForwardMap, &InverseMap));
+
+  // Find the (first) error node in the trimmed graph.  We just need to consult
+  // the node map which maps from nodes in the original graph to nodes
+  // in the new graph.
+  llvm::SmallPtrSet<const ExplodedNode *, 32> RemainingNodes;
+
+  for (unsigned i = 0, count = Nodes.size(); i < count; ++i) {
+    if (const ExplodedNode *NewNode = ForwardMap.lookup(Nodes[i])) {
+      ReportNodes.push_back(std::make_pair(NewNode, i));
+      RemainingNodes.insert(NewNode);
+    }
+  }
+
+  assert(!RemainingNodes.empty() && "No error node found in the trimmed graph");
+
+  // Perform a forward BFS to find all the shortest paths.
+  std::queue<const ExplodedNode *> WS;
+
+  assert(G->num_roots() == 1);
+  WS.push(*G->roots_begin());
+  unsigned Priority = 0;
+
+  while (!WS.empty()) {
+    const ExplodedNode *Node = WS.front();
+    WS.pop();
+
+    PriorityMapTy::iterator PriorityEntry;
+    bool IsNew;
+    llvm::tie(PriorityEntry, IsNew) =
+      PriorityMap.insert(std::make_pair(Node, Priority));
+    ++Priority;
+
+    if (!IsNew) {
+      assert(PriorityEntry->second <= Priority);
+      continue;
+    }
+
+    if (RemainingNodes.erase(Node))
+      if (RemainingNodes.empty())
+        break;
+
+    for (ExplodedNode::const_pred_iterator I = Node->succ_begin(),
+                                           E = Node->succ_end();
+         I != E; ++I)
+      WS.push(*I);
+  }
+
+  // Sort the error paths from longest to shortest.
+  std::sort(ReportNodes.begin(), ReportNodes.end(),
+            PriorityCompare<true>(PriorityMap));
+}
+
+bool TrimmedGraph::popNextReportGraph(ReportGraph &GraphWrapper) {
+  if (ReportNodes.empty())
+    return false;
+
+  const ExplodedNode *OrigN;
+  llvm::tie(OrigN, GraphWrapper.Index) = ReportNodes.pop_back_val();
+  assert(PriorityMap.find(OrigN) != PriorityMap.end() &&
+         "error node not accessible from root");
+
+  // Create a new graph with a single path.  This is the graph
+  // that will be returned to the caller.
+  ExplodedGraph *GNew = new ExplodedGraph();
+  GraphWrapper.Graph.reset(GNew);
+  GraphWrapper.BackMap.clear();
+
+  // Now walk from the error node up the BFS path, always taking the
+  // predeccessor with the lowest number.
+  ExplodedNode *Succ = 0;
+  while (true) {
+    // Create the equivalent node in the new graph with the same state
+    // and location.
+    ExplodedNode *NewN = GNew->getNode(OrigN->getLocation(), OrigN->getState(),
+                                       OrigN->isSink());
+
+    // Store the mapping to the original node.
+    InterExplodedGraphMap::const_iterator IMitr = InverseMap.find(OrigN);
+    assert(IMitr != InverseMap.end() && "No mapping to original node.");
+    GraphWrapper.BackMap[NewN] = IMitr->second;
+
+    // Link up the new node with the previous node.
+    if (Succ)
+      Succ->addPredecessor(NewN, *GNew);
+    else
+      GraphWrapper.ErrorNode = NewN;
+
+    Succ = NewN;
+
+    // Are we at the final node?
+    if (OrigN->pred_empty()) {
+      GNew->addRoot(NewN);
+      break;
+    }
+
+    // Find the next predeccessor node.  We choose the node that is marked
+    // with the lowest BFS number.
+    OrigN = *std::min_element(OrigN->pred_begin(), OrigN->pred_end(),
+                          PriorityCompare<false>(PriorityMap));
+  }
+
+  return true;
+}
+
+
+/// CompactPathDiagnostic - This function postprocesses a PathDiagnostic object
+///  and collapses PathDiagosticPieces that are expanded by macros.
+static void CompactPathDiagnostic(PathPieces &path, const SourceManager& SM) {
+  typedef std::vector<std::pair<IntrusiveRefCntPtr<PathDiagnosticMacroPiece>,
+                                SourceLocation> > MacroStackTy;
+
+  typedef std::vector<IntrusiveRefCntPtr<PathDiagnosticPiece> >
+          PiecesTy;
+
+  MacroStackTy MacroStack;
+  PiecesTy Pieces;
+
+  for (PathPieces::const_iterator I = path.begin(), E = path.end();
+       I!=E; ++I) {
+    
+    PathDiagnosticPiece *piece = I->getPtr();
+
+    // Recursively compact calls.
+    if (PathDiagnosticCallPiece *call=dyn_cast<PathDiagnosticCallPiece>(piece)){
+      CompactPathDiagnostic(call->path, SM);
+    }
+    
+    // Get the location of the PathDiagnosticPiece.
+    const FullSourceLoc Loc = piece->getLocation().asLocation();
+
+    // Determine the instantiation location, which is the location we group
+    // related PathDiagnosticPieces.
+    SourceLocation InstantiationLoc = Loc.isMacroID() ?
+                                      SM.getExpansionLoc(Loc) :
+                                      SourceLocation();
+
+    if (Loc.isFileID()) {
+      MacroStack.clear();
+      Pieces.push_back(piece);
+      continue;
+    }
+
+    assert(Loc.isMacroID());
+
+    // Is the PathDiagnosticPiece within the same macro group?
+    if (!MacroStack.empty() && InstantiationLoc == MacroStack.back().second) {
+      MacroStack.back().first->subPieces.push_back(piece);
+      continue;
+    }
+
+    // We aren't in the same group.  Are we descending into a new macro
+    // or are part of an old one?
+    IntrusiveRefCntPtr<PathDiagnosticMacroPiece> MacroGroup;
+
+    SourceLocation ParentInstantiationLoc = InstantiationLoc.isMacroID() ?
+                                          SM.getExpansionLoc(Loc) :
+                                          SourceLocation();
+
+    // Walk the entire macro stack.
+    while (!MacroStack.empty()) {
+      if (InstantiationLoc == MacroStack.back().second) {
+        MacroGroup = MacroStack.back().first;
+        break;
+      }
+
+      if (ParentInstantiationLoc == MacroStack.back().second) {
+        MacroGroup = MacroStack.back().first;
+        break;
+      }
+
+      MacroStack.pop_back();
+    }
+
+    if (!MacroGroup || ParentInstantiationLoc == MacroStack.back().second) {
+      // Create a new macro group and add it to the stack.
+      PathDiagnosticMacroPiece *NewGroup =
+        new PathDiagnosticMacroPiece(
+          PathDiagnosticLocation::createSingleLocation(piece->getLocation()));
+
+      if (MacroGroup)
+        MacroGroup->subPieces.push_back(NewGroup);
+      else {
+        assert(InstantiationLoc.isFileID());
+        Pieces.push_back(NewGroup);
+      }
+
+      MacroGroup = NewGroup;
+      MacroStack.push_back(std::make_pair(MacroGroup, InstantiationLoc));
+    }
+
+    // Finally, add the PathDiagnosticPiece to the group.
+    MacroGroup->subPieces.push_back(piece);
+  }
+
+  // Now take the pieces and construct a new PathDiagnostic.
+  path.clear();
+
+  for (PiecesTy::iterator I=Pieces.begin(), E=Pieces.end(); I!=E; ++I)
+    path.push_back(*I);
+}
+
+bool GRBugReporter::generatePathDiagnostic(PathDiagnostic& PD,
+                                           PathDiagnosticConsumer &PC,
+                                           ArrayRef<BugReport *> &bugReports) {
+  assert(!bugReports.empty());
+
+  bool HasValid = false;
+  bool HasInvalid = false;
+  SmallVector<const ExplodedNode *, 32> errorNodes;
+  for (ArrayRef<BugReport*>::iterator I = bugReports.begin(),
+                                      E = bugReports.end(); I != E; ++I) {
+    if ((*I)->isValid()) {
+      HasValid = true;
+      errorNodes.push_back((*I)->getErrorNode());
+    } else {
+      // Keep the errorNodes list in sync with the bugReports list.
+      HasInvalid = true;
+      errorNodes.push_back(0);
+    }
+  }
+
+  // If all the reports have been marked invalid by a previous path generation,
+  // we're done.
+  if (!HasValid)
+    return false;
+
+  typedef PathDiagnosticConsumer::PathGenerationScheme PathGenerationScheme;
+  PathGenerationScheme ActiveScheme = PC.getGenerationScheme();
+
+  if (ActiveScheme == PathDiagnosticConsumer::Extensive) {
+    AnalyzerOptions &options = getEngine().getAnalysisManager().options;
+    if (options.getBooleanOption("path-diagnostics-alternate", false)) {
+      ActiveScheme = PathDiagnosticConsumer::AlternateExtensive;
+    }
+  }
+
+  TrimmedGraph TrimG(&getGraph(), errorNodes);
+  ReportGraph ErrorGraph;
+
+  while (TrimG.popNextReportGraph(ErrorGraph)) {
+    // Find the BugReport with the original location.
+    assert(ErrorGraph.Index < bugReports.size());
+    BugReport *R = bugReports[ErrorGraph.Index];
+    assert(R && "No original report found for sliced graph.");
+    assert(R->isValid() && "Report selected by trimmed graph marked invalid.");
+
+    // Start building the path diagnostic...
+    PathDiagnosticBuilder PDB(*this, R, ErrorGraph.BackMap, &PC);
+    const ExplodedNode *N = ErrorGraph.ErrorNode;
+
+    // Register additional node visitors.
+    R->addVisitor(new NilReceiverBRVisitor());
+    R->addVisitor(new ConditionBRVisitor());
+    R->addVisitor(new LikelyFalsePositiveSuppressionBRVisitor());
+
+    BugReport::VisitorList visitors;
+    unsigned origReportConfigToken, finalReportConfigToken;
+    LocationContextMap LCM;
+
+    // While generating diagnostics, it's possible the visitors will decide
+    // new symbols and regions are interesting, or add other visitors based on
+    // the information they find. If they do, we need to regenerate the path
+    // based on our new report configuration.
+    do {
+      // Get a clean copy of all the visitors.
+      for (BugReport::visitor_iterator I = R->visitor_begin(),
+                                       E = R->visitor_end(); I != E; ++I)
+        visitors.push_back((*I)->clone());
+
+      // Clear out the active path from any previous work.
+      PD.resetPath();
+      origReportConfigToken = R->getConfigurationChangeToken();
+
+      // Generate the very last diagnostic piece - the piece is visible before 
+      // the trace is expanded.
+      PathDiagnosticPiece *LastPiece = 0;
+      for (BugReport::visitor_iterator I = visitors.begin(), E = visitors.end();
+          I != E; ++I) {
+        if (PathDiagnosticPiece *Piece = (*I)->getEndPath(PDB, N, *R)) {
+          assert (!LastPiece &&
+              "There can only be one final piece in a diagnostic.");
+          LastPiece = Piece;
+        }
+      }
+
+      if (ActiveScheme != PathDiagnosticConsumer::None) {
+        if (!LastPiece)
+          LastPiece = BugReporterVisitor::getDefaultEndPath(PDB, N, *R);
+        assert(LastPiece);
+        PD.setEndOfPath(LastPiece);
+      }
+
+      // Make sure we get a clean location context map so we don't
+      // hold onto old mappings.
+      LCM.clear();
+
+      switch (ActiveScheme) {
+      case PathDiagnosticConsumer::AlternateExtensive:
+        GenerateAlternateExtensivePathDiagnostic(PD, PDB, N, LCM, visitors);
+        break;
+      case PathDiagnosticConsumer::Extensive:
+        GenerateExtensivePathDiagnostic(PD, PDB, N, LCM, visitors);
+        break;
+      case PathDiagnosticConsumer::Minimal:
+        GenerateMinimalPathDiagnostic(PD, PDB, N, LCM, visitors);
+        break;
+      case PathDiagnosticConsumer::None:
+        GenerateVisitorsOnlyPathDiagnostic(PD, PDB, N, visitors);
+        break;
+      }
+
+      // Clean up the visitors we used.
+      llvm::DeleteContainerPointers(visitors);
+
+      // Did anything change while generating this path?
+      finalReportConfigToken = R->getConfigurationChangeToken();
+    } while (finalReportConfigToken != origReportConfigToken);
+
+    if (!R->isValid())
+      continue;
+
+    // Finally, prune the diagnostic path of uninteresting stuff.
+    if (!PD.path.empty()) {
+      // Remove messages that are basically the same.
+      removeRedundantMsgs(PD.getMutablePieces());
+
+      if (R->shouldPrunePath() &&
+          getEngine().getAnalysisManager().options.shouldPrunePaths()) {
+        bool stillHasNotes = removeUnneededCalls(PD.getMutablePieces(), R, LCM);
+        assert(stillHasNotes);
+        (void)stillHasNotes;
+      }
+
+      adjustCallLocations(PD.getMutablePieces());
+
+      if (ActiveScheme == PathDiagnosticConsumer::AlternateExtensive) {
+        ControlFlowBarrierSet CFBS;
+        OptimizedCallsSet OCS;
+        while (optimizeEdges(PD.getMutablePieces(), getSourceManager(), CFBS,
+                             OCS, LCM)) {}
+      }
+    }
+
+    // We found a report and didn't suppress it.
+    return true;
+  }
+
+  // We suppressed all the reports in this equivalence class.
+  assert(!HasInvalid && "Inconsistent suppression");
+  (void)HasInvalid;
+  return false;
+}
+
+void BugReporter::Register(BugType *BT) {
+  BugTypes = F.add(BugTypes, BT);
+}
+
+void BugReporter::emitReport(BugReport* R) {
+  // Compute the bug report's hash to determine its equivalence class.
+  llvm::FoldingSetNodeID ID;
+  R->Profile(ID);
+
+  // Lookup the equivance class.  If there isn't one, create it.
+  BugType& BT = R->getBugType();
+  Register(&BT);
+  void *InsertPos;
+  BugReportEquivClass* EQ = EQClasses.FindNodeOrInsertPos(ID, InsertPos);
+
+  if (!EQ) {
+    EQ = new BugReportEquivClass(R);
+    EQClasses.InsertNode(EQ, InsertPos);
+    EQClassesVector.push_back(EQ);
+  }
+  else
+    EQ->AddReport(R);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Emitting reports in equivalence classes.
+//===----------------------------------------------------------------------===//
+
+namespace {
+struct FRIEC_WLItem {
+  const ExplodedNode *N;
+  ExplodedNode::const_succ_iterator I, E;
+  
+  FRIEC_WLItem(const ExplodedNode *n)
+  : N(n), I(N->succ_begin()), E(N->succ_end()) {}
+};  
+}
+
+static BugReport *
+FindReportInEquivalenceClass(BugReportEquivClass& EQ,
+                             SmallVectorImpl<BugReport*> &bugReports) {
+
+  BugReportEquivClass::iterator I = EQ.begin(), E = EQ.end();
+  assert(I != E);
+  BugType& BT = I->getBugType();
+
+  // If we don't need to suppress any of the nodes because they are
+  // post-dominated by a sink, simply add all the nodes in the equivalence class
+  // to 'Nodes'.  Any of the reports will serve as a "representative" report.
+  if (!BT.isSuppressOnSink()) {
+    BugReport *R = I;
+    for (BugReportEquivClass::iterator I=EQ.begin(), E=EQ.end(); I!=E; ++I) {
+      const ExplodedNode *N = I->getErrorNode();
+      if (N) {
+        R = I;
+        bugReports.push_back(R);
+      }
+    }
+    return R;
+  }
+
+  // For bug reports that should be suppressed when all paths are post-dominated
+  // by a sink node, iterate through the reports in the equivalence class
+  // until we find one that isn't post-dominated (if one exists).  We use a
+  // DFS traversal of the ExplodedGraph to find a non-sink node.  We could write
+  // this as a recursive function, but we don't want to risk blowing out the
+  // stack for very long paths.
+  BugReport *exampleReport = 0;
+
+  for (; I != E; ++I) {
+    const ExplodedNode *errorNode = I->getErrorNode();
+
+    if (!errorNode)
+      continue;
+    if (errorNode->isSink()) {
+      llvm_unreachable(
+           "BugType::isSuppressSink() should not be 'true' for sink end nodes");
+    }
+    // No successors?  By definition this nodes isn't post-dominated by a sink.
+    if (errorNode->succ_empty()) {
+      bugReports.push_back(I);
+      if (!exampleReport)
+        exampleReport = I;
+      continue;
+    }
+
+    // At this point we know that 'N' is not a sink and it has at least one
+    // successor.  Use a DFS worklist to find a non-sink end-of-path node.    
+    typedef FRIEC_WLItem WLItem;
+    typedef SmallVector<WLItem, 10> DFSWorkList;
+    llvm::DenseMap<const ExplodedNode *, unsigned> Visited;
+    
+    DFSWorkList WL;
+    WL.push_back(errorNode);
+    Visited[errorNode] = 1;
+    
+    while (!WL.empty()) {
+      WLItem &WI = WL.back();
+      assert(!WI.N->succ_empty());
+            
+      for (; WI.I != WI.E; ++WI.I) {
+        const ExplodedNode *Succ = *WI.I;        
+        // End-of-path node?
+        if (Succ->succ_empty()) {
+          // If we found an end-of-path node that is not a sink.
+          if (!Succ->isSink()) {
+            bugReports.push_back(I);
+            if (!exampleReport)
+              exampleReport = I;
+            WL.clear();
+            break;
+          }
+          // Found a sink?  Continue on to the next successor.
+          continue;
+        }
+        // Mark the successor as visited.  If it hasn't been explored,
+        // enqueue it to the DFS worklist.
+        unsigned &mark = Visited[Succ];
+        if (!mark) {
+          mark = 1;
+          WL.push_back(Succ);
+          break;
+        }
+      }
+
+      // The worklist may have been cleared at this point.  First
+      // check if it is empty before checking the last item.
+      if (!WL.empty() && &WL.back() == &WI)
+        WL.pop_back();
+    }
+  }
+
+  // ExampleReport will be NULL if all the nodes in the equivalence class
+  // were post-dominated by sinks.
+  return exampleReport;
+}
+
+void BugReporter::FlushReport(BugReportEquivClass& EQ) {
+  SmallVector<BugReport*, 10> bugReports;
+  BugReport *exampleReport = FindReportInEquivalenceClass(EQ, bugReports);
+  if (exampleReport) {
+    const PathDiagnosticConsumers &C = getPathDiagnosticConsumers();
+    for (PathDiagnosticConsumers::const_iterator I=C.begin(),
+                                                 E=C.end(); I != E; ++I) {
+      FlushReport(exampleReport, **I, bugReports);
+    }
+  }
+}
+
+void BugReporter::FlushReport(BugReport *exampleReport,
+                              PathDiagnosticConsumer &PD,
+                              ArrayRef<BugReport*> bugReports) {
+
+  // FIXME: Make sure we use the 'R' for the path that was actually used.
+  // Probably doesn't make a difference in practice.
+  BugType& BT = exampleReport->getBugType();
+
+  OwningPtr<PathDiagnostic>
+    D(new PathDiagnostic(exampleReport->getDeclWithIssue(),
+                         exampleReport->getBugType().getName(),
+                         exampleReport->getDescription(),
+                         exampleReport->getShortDescription(/*Fallback=*/false),
+                         BT.getCategory(),
+                         exampleReport->getUniqueingLocation(),
+                         exampleReport->getUniqueingDecl()));
+
+  MaxBugClassSize = std::max(bugReports.size(),
+                             static_cast<size_t>(MaxBugClassSize));
+
+  // Generate the full path diagnostic, using the generation scheme
+  // specified by the PathDiagnosticConsumer. Note that we have to generate
+  // path diagnostics even for consumers which do not support paths, because
+  // the BugReporterVisitors may mark this bug as a false positive.
+  if (!bugReports.empty())
+    if (!generatePathDiagnostic(*D.get(), PD, bugReports))
+      return;
+
+  MaxValidBugClassSize = std::max(bugReports.size(),
+                                  static_cast<size_t>(MaxValidBugClassSize));
+
+  // If the path is empty, generate a single step path with the location
+  // of the issue.
+  if (D->path.empty()) {
+    PathDiagnosticLocation L = exampleReport->getLocation(getSourceManager());
+    PathDiagnosticPiece *piece =
+      new PathDiagnosticEventPiece(L, exampleReport->getDescription());
+    BugReport::ranges_iterator Beg, End;
+    llvm::tie(Beg, End) = exampleReport->getRanges();
+    for ( ; Beg != End; ++Beg)
+      piece->addRange(*Beg);
+    D->setEndOfPath(piece);
+  }
+
+  // Get the meta data.
+  const BugReport::ExtraTextList &Meta = exampleReport->getExtraText();
+  for (BugReport::ExtraTextList::const_iterator i = Meta.begin(),
+                                                e = Meta.end(); i != e; ++i) {
+    D->addMeta(*i);
+  }
+
+  PD.HandlePathDiagnostic(D.take());
+}
+
+void BugReporter::EmitBasicReport(const Decl *DeclWithIssue,
+                                  StringRef name,
+                                  StringRef category,
+                                  StringRef str, PathDiagnosticLocation Loc,
+                                  SourceRange* RBeg, unsigned NumRanges) {
+
+  // 'BT' is owned by BugReporter.
+  BugType *BT = getBugTypeForName(name, category);
+  BugReport *R = new BugReport(*BT, str, Loc);
+  R->setDeclWithIssue(DeclWithIssue);
+  for ( ; NumRanges > 0 ; --NumRanges, ++RBeg) R->addRange(*RBeg);
+  emitReport(R);
+}
+
+BugType *BugReporter::getBugTypeForName(StringRef name,
+                                        StringRef category) {
+  SmallString<136> fullDesc;
+  llvm::raw_svector_ostream(fullDesc) << name << ":" << category;
+  llvm::StringMapEntry<BugType *> &
+      entry = StrBugTypes.GetOrCreateValue(fullDesc);
+  BugType *BT = entry.getValue();
+  if (!BT) {
+    BT = new BugType(name, category);
+    entry.setValue(BT);
+  }
+  return BT;
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
new file mode 100644
index 0000000..e078745
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
@@ -0,0 +1,1600 @@
+// BugReporterVisitors.cpp - Helpers for reporting bugs -----------*- C++ -*--//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines a set of BugReporter "visitors" which can be used to
+//  enhance the diagnostics reported for a bug.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitor.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+using llvm::FoldingSetNodeID;
+
+//===----------------------------------------------------------------------===//
+// Utility functions.
+//===----------------------------------------------------------------------===//
+
+bool bugreporter::isDeclRefExprToReference(const Expr *E) {
+  if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+    return DRE->getDecl()->getType()->isReferenceType();
+  }
+  return false;
+}
+
+const Expr *bugreporter::getDerefExpr(const Stmt *S) {
+  // Pattern match for a few useful cases:
+  //   a[0], p->f, *p
+  const Expr *E = dyn_cast<Expr>(S);
+  if (!E)
+    return 0;
+  E = E->IgnoreParenCasts();
+
+  while (true) {
+    if (const BinaryOperator *B = dyn_cast<BinaryOperator>(E)) {
+      assert(B->isAssignmentOp());
+      E = B->getLHS()->IgnoreParenCasts();
+      continue;
+    }
+    else if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) {
+      if (U->getOpcode() == UO_Deref)
+        return U->getSubExpr()->IgnoreParenCasts();
+    }
+    else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
+      if (ME->isArrow() || isDeclRefExprToReference(ME->getBase())) {
+        return ME->getBase()->IgnoreParenCasts();
+      } else {
+        // If we have a member expr with a dot, the base must have been
+        // dereferenced.
+        return getDerefExpr(ME->getBase());
+      }
+    }
+    else if (const ObjCIvarRefExpr *IvarRef = dyn_cast<ObjCIvarRefExpr>(E)) {
+      return IvarRef->getBase()->IgnoreParenCasts();
+    }
+    else if (const ArraySubscriptExpr *AE = dyn_cast<ArraySubscriptExpr>(E)) {
+      return AE->getBase();
+    }
+    else if (isDeclRefExprToReference(E)) {
+      return E;
+    }
+    break;
+  }
+
+  return NULL;
+}
+
+const Stmt *bugreporter::GetDenomExpr(const ExplodedNode *N) {
+  const Stmt *S = N->getLocationAs<PreStmt>()->getStmt();
+  if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(S))
+    return BE->getRHS();
+  return NULL;
+}
+
+const Stmt *bugreporter::GetRetValExpr(const ExplodedNode *N) {
+  const Stmt *S = N->getLocationAs<PostStmt>()->getStmt();
+  if (const ReturnStmt *RS = dyn_cast<ReturnStmt>(S))
+    return RS->getRetValue();
+  return NULL;
+}
+
+//===----------------------------------------------------------------------===//
+// Definitions for bug reporter visitors.
+//===----------------------------------------------------------------------===//
+
+PathDiagnosticPiece*
+BugReporterVisitor::getEndPath(BugReporterContext &BRC,
+                               const ExplodedNode *EndPathNode,
+                               BugReport &BR) {
+  return 0;
+}
+
+PathDiagnosticPiece*
+BugReporterVisitor::getDefaultEndPath(BugReporterContext &BRC,
+                                      const ExplodedNode *EndPathNode,
+                                      BugReport &BR) {
+  PathDiagnosticLocation L =
+    PathDiagnosticLocation::createEndOfPath(EndPathNode,BRC.getSourceManager());
+
+  BugReport::ranges_iterator Beg, End;
+  llvm::tie(Beg, End) = BR.getRanges();
+
+  // Only add the statement itself as a range if we didn't specify any
+  // special ranges for this report.
+  PathDiagnosticPiece *P = new PathDiagnosticEventPiece(L,
+      BR.getDescription(),
+      Beg == End);
+  for (; Beg != End; ++Beg)
+    P->addRange(*Beg);
+
+  return P;
+}
+
+
+namespace {
+/// Emits an extra note at the return statement of an interesting stack frame.
+///
+/// The returned value is marked as an interesting value, and if it's null,
+/// adds a visitor to track where it became null.
+///
+/// This visitor is intended to be used when another visitor discovers that an
+/// interesting value comes from an inlined function call.
+class ReturnVisitor : public BugReporterVisitorImpl<ReturnVisitor> {
+  const StackFrameContext *StackFrame;
+  enum {
+    Initial,
+    MaybeUnsuppress,
+    Satisfied
+  } Mode;
+
+  bool EnableNullFPSuppression;
+
+public:
+  ReturnVisitor(const StackFrameContext *Frame, bool Suppressed)
+    : StackFrame(Frame), Mode(Initial), EnableNullFPSuppression(Suppressed) {}
+
+  static void *getTag() {
+    static int Tag = 0;
+    return static_cast<void *>(&Tag);
+  }
+
+  virtual void Profile(llvm::FoldingSetNodeID &ID) const {
+    ID.AddPointer(ReturnVisitor::getTag());
+    ID.AddPointer(StackFrame);
+    ID.AddBoolean(EnableNullFPSuppression);
+  }
+
+  /// Adds a ReturnVisitor if the given statement represents a call that was
+  /// inlined.
+  ///
+  /// This will search back through the ExplodedGraph, starting from the given
+  /// node, looking for when the given statement was processed. If it turns out
+  /// the statement is a call that was inlined, we add the visitor to the
+  /// bug report, so it can print a note later.
+  static void addVisitorIfNecessary(const ExplodedNode *Node, const Stmt *S,
+                                    BugReport &BR,
+                                    bool InEnableNullFPSuppression) {
+    if (!CallEvent::isCallStmt(S))
+      return;
+    
+    // First, find when we processed the statement.
+    do {
+      if (Optional<CallExitEnd> CEE = Node->getLocationAs<CallExitEnd>())
+        if (CEE->getCalleeContext()->getCallSite() == S)
+          break;
+      if (Optional<StmtPoint> SP = Node->getLocationAs<StmtPoint>())
+        if (SP->getStmt() == S)
+          break;
+
+      Node = Node->getFirstPred();
+    } while (Node);
+
+    // Next, step over any post-statement checks.
+    while (Node && Node->getLocation().getAs<PostStmt>())
+      Node = Node->getFirstPred();
+    if (!Node)
+      return;
+
+    // Finally, see if we inlined the call.
+    Optional<CallExitEnd> CEE = Node->getLocationAs<CallExitEnd>();
+    if (!CEE)
+      return;
+    
+    const StackFrameContext *CalleeContext = CEE->getCalleeContext();
+    if (CalleeContext->getCallSite() != S)
+      return;
+    
+    // Check the return value.
+    ProgramStateRef State = Node->getState();
+    SVal RetVal = State->getSVal(S, Node->getLocationContext());
+
+    // Handle cases where a reference is returned and then immediately used.
+    if (cast<Expr>(S)->isGLValue())
+      if (Optional<Loc> LValue = RetVal.getAs<Loc>())
+        RetVal = State->getSVal(*LValue);
+
+    // See if the return value is NULL. If so, suppress the report.
+    SubEngine *Eng = State->getStateManager().getOwningEngine();
+    assert(Eng && "Cannot file a bug report without an owning engine");
+    AnalyzerOptions &Options = Eng->getAnalysisManager().options;
+
+    bool EnableNullFPSuppression = false;
+    if (InEnableNullFPSuppression && Options.shouldSuppressNullReturnPaths())
+      if (Optional<Loc> RetLoc = RetVal.getAs<Loc>())
+        EnableNullFPSuppression = State->isNull(*RetLoc).isConstrainedTrue();
+
+    BR.markInteresting(CalleeContext);
+    BR.addVisitor(new ReturnVisitor(CalleeContext, EnableNullFPSuppression));
+  }
+
+  /// Returns true if any counter-suppression heuristics are enabled for
+  /// ReturnVisitor.
+  static bool hasCounterSuppression(AnalyzerOptions &Options) {
+    return Options.shouldAvoidSuppressingNullArgumentPaths();
+  }
+
+  PathDiagnosticPiece *visitNodeInitial(const ExplodedNode *N,
+                                        const ExplodedNode *PrevN,
+                                        BugReporterContext &BRC,
+                                        BugReport &BR) {
+    // Only print a message at the interesting return statement.
+    if (N->getLocationContext() != StackFrame)
+      return 0;
+
+    Optional<StmtPoint> SP = N->getLocationAs<StmtPoint>();
+    if (!SP)
+      return 0;
+
+    const ReturnStmt *Ret = dyn_cast<ReturnStmt>(SP->getStmt());
+    if (!Ret)
+      return 0;
+
+    // Okay, we're at the right return statement, but do we have the return
+    // value available?
+    ProgramStateRef State = N->getState();
+    SVal V = State->getSVal(Ret, StackFrame);
+    if (V.isUnknownOrUndef())
+      return 0;
+
+    // Don't print any more notes after this one.
+    Mode = Satisfied;
+
+    const Expr *RetE = Ret->getRetValue();
+    assert(RetE && "Tracking a return value for a void function");
+
+    // Handle cases where a reference is returned and then immediately used.
+    Optional<Loc> LValue;
+    if (RetE->isGLValue()) {
+      if ((LValue = V.getAs<Loc>())) {
+        SVal RValue = State->getRawSVal(*LValue, RetE->getType());
+        if (RValue.getAs<DefinedSVal>())
+          V = RValue;
+      }
+    }
+
+    // Ignore aggregate rvalues.
+    if (V.getAs<nonloc::LazyCompoundVal>() ||
+        V.getAs<nonloc::CompoundVal>())
+      return 0;
+
+    RetE = RetE->IgnoreParenCasts();
+
+    // If we can't prove the return value is 0, just mark it interesting, and
+    // make sure to track it into any further inner functions.
+    if (!State->isNull(V).isConstrainedTrue()) {
+      BR.markInteresting(V);
+      ReturnVisitor::addVisitorIfNecessary(N, RetE, BR,
+                                           EnableNullFPSuppression);
+      return 0;
+    }
+      
+    // If we're returning 0, we should track where that 0 came from.
+    bugreporter::trackNullOrUndefValue(N, RetE, BR, /*IsArg*/ false,
+                                       EnableNullFPSuppression);
+
+    // Build an appropriate message based on the return value.
+    SmallString<64> Msg;
+    llvm::raw_svector_ostream Out(Msg);
+
+    if (V.getAs<Loc>()) {
+      // If we have counter-suppression enabled, make sure we keep visiting
+      // future nodes. We want to emit a path note as well, in case
+      // the report is resurrected as valid later on.
+      ExprEngine &Eng = BRC.getBugReporter().getEngine();
+      AnalyzerOptions &Options = Eng.getAnalysisManager().options;
+      if (EnableNullFPSuppression && hasCounterSuppression(Options))
+        Mode = MaybeUnsuppress;
+
+      if (RetE->getType()->isObjCObjectPointerType())
+        Out << "Returning nil";
+      else
+        Out << "Returning null pointer";
+    } else {
+      Out << "Returning zero";
+    }
+
+    if (LValue) {
+      if (const MemRegion *MR = LValue->getAsRegion()) {
+        if (MR->canPrintPretty()) {
+          Out << " (reference to ";
+          MR->printPretty(Out);
+          Out << ")";
+        }
+      }
+    } else {
+      // FIXME: We should have a more generalized location printing mechanism.
+      if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(RetE))
+        if (const DeclaratorDecl *DD = dyn_cast<DeclaratorDecl>(DR->getDecl()))
+          Out << " (loaded from '" << *DD << "')";
+    }
+
+    PathDiagnosticLocation L(Ret, BRC.getSourceManager(), StackFrame);
+    return new PathDiagnosticEventPiece(L, Out.str());
+  }
+
+  PathDiagnosticPiece *visitNodeMaybeUnsuppress(const ExplodedNode *N,
+                                                const ExplodedNode *PrevN,
+                                                BugReporterContext &BRC,
+                                                BugReport &BR) {
+#ifndef NDEBUG
+    ExprEngine &Eng = BRC.getBugReporter().getEngine();
+    AnalyzerOptions &Options = Eng.getAnalysisManager().options;
+    assert(hasCounterSuppression(Options));
+#endif
+
+    // Are we at the entry node for this call?
+    Optional<CallEnter> CE = N->getLocationAs<CallEnter>();
+    if (!CE)
+      return 0;
+
+    if (CE->getCalleeContext() != StackFrame)
+      return 0;
+
+    Mode = Satisfied;
+
+    // Don't automatically suppress a report if one of the arguments is
+    // known to be a null pointer. Instead, start tracking /that/ null
+    // value back to its origin.
+    ProgramStateManager &StateMgr = BRC.getStateManager();
+    CallEventManager &CallMgr = StateMgr.getCallEventManager();
+
+    ProgramStateRef State = N->getState();
+    CallEventRef<> Call = CallMgr.getCaller(StackFrame, State);
+    for (unsigned I = 0, E = Call->getNumArgs(); I != E; ++I) {
+      Optional<Loc> ArgV = Call->getArgSVal(I).getAs<Loc>();
+      if (!ArgV)
+        continue;
+
+      const Expr *ArgE = Call->getArgExpr(I);
+      if (!ArgE)
+        continue;
+
+      // Is it possible for this argument to be non-null?
+      if (!State->isNull(*ArgV).isConstrainedTrue())
+        continue;
+
+      if (bugreporter::trackNullOrUndefValue(N, ArgE, BR, /*IsArg=*/true,
+                                             EnableNullFPSuppression))
+        BR.removeInvalidation(ReturnVisitor::getTag(), StackFrame);
+
+      // If we /can't/ track the null pointer, we should err on the side of
+      // false negatives, and continue towards marking this report invalid.
+      // (We will still look at the other arguments, though.)
+    }
+
+    return 0;
+  }
+
+  PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+                                 const ExplodedNode *PrevN,
+                                 BugReporterContext &BRC,
+                                 BugReport &BR) {
+    switch (Mode) {
+    case Initial:
+      return visitNodeInitial(N, PrevN, BRC, BR);
+    case MaybeUnsuppress:
+      return visitNodeMaybeUnsuppress(N, PrevN, BRC, BR);
+    case Satisfied:
+      return 0;
+    }
+
+    llvm_unreachable("Invalid visit mode!");
+  }
+
+  PathDiagnosticPiece *getEndPath(BugReporterContext &BRC,
+                                  const ExplodedNode *N,
+                                  BugReport &BR) {
+    if (EnableNullFPSuppression)
+      BR.markInvalid(ReturnVisitor::getTag(), StackFrame);
+    return 0;
+  }
+};
+} // end anonymous namespace
+
+
+void FindLastStoreBRVisitor ::Profile(llvm::FoldingSetNodeID &ID) const {
+  static int tag = 0;
+  ID.AddPointer(&tag);
+  ID.AddPointer(R);
+  ID.Add(V);
+  ID.AddBoolean(EnableNullFPSuppression);
+}
+
+/// Returns true if \p N represents the DeclStmt declaring and initializing
+/// \p VR.
+static bool isInitializationOfVar(const ExplodedNode *N, const VarRegion *VR) {
+  Optional<PostStmt> P = N->getLocationAs<PostStmt>();
+  if (!P)
+    return false;
+
+  const DeclStmt *DS = P->getStmtAs<DeclStmt>();
+  if (!DS)
+    return false;
+
+  if (DS->getSingleDecl() != VR->getDecl())
+    return false;
+
+  const MemSpaceRegion *VarSpace = VR->getMemorySpace();
+  const StackSpaceRegion *FrameSpace = dyn_cast<StackSpaceRegion>(VarSpace);
+  if (!FrameSpace) {
+    // If we ever directly evaluate global DeclStmts, this assertion will be
+    // invalid, but this still seems preferable to silently accepting an
+    // initialization that may be for a path-sensitive variable.
+    assert(VR->getDecl()->isStaticLocal() && "non-static stackless VarRegion");
+    return true;
+  }
+
+  assert(VR->getDecl()->hasLocalStorage());
+  const LocationContext *LCtx = N->getLocationContext();
+  return FrameSpace->getStackFrame() == LCtx->getCurrentStackFrame();
+}
+
+PathDiagnosticPiece *FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
+                                                       const ExplodedNode *Pred,
+                                                       BugReporterContext &BRC,
+                                                       BugReport &BR) {
+
+  if (Satisfied)
+    return NULL;
+
+  const ExplodedNode *StoreSite = 0;
+  const Expr *InitE = 0;
+  bool IsParam = false;
+
+  // First see if we reached the declaration of the region.
+  if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
+    if (isInitializationOfVar(Pred, VR)) {
+      StoreSite = Pred;
+      InitE = VR->getDecl()->getInit();
+    }
+  }
+
+  // If this is a post initializer expression, initializing the region, we
+  // should track the initializer expression.
+  if (Optional<PostInitializer> PIP = Pred->getLocationAs<PostInitializer>()) {
+    const MemRegion *FieldReg = (const MemRegion *)PIP->getLocationValue();
+    if (FieldReg && FieldReg == R) {
+      StoreSite = Pred;
+      InitE = PIP->getInitializer()->getInit();
+    }
+  }
+  
+  // Otherwise, see if this is the store site:
+  // (1) Succ has this binding and Pred does not, i.e. this is
+  //     where the binding first occurred.
+  // (2) Succ has this binding and is a PostStore node for this region, i.e.
+  //     the same binding was re-assigned here.
+  if (!StoreSite) {
+    if (Succ->getState()->getSVal(R) != V)
+      return NULL;
+
+    if (Pred->getState()->getSVal(R) == V) {
+      Optional<PostStore> PS = Succ->getLocationAs<PostStore>();
+      if (!PS || PS->getLocationValue() != R)
+        return NULL;
+    }
+
+    StoreSite = Succ;
+
+    // If this is an assignment expression, we can track the value
+    // being assigned.
+    if (Optional<PostStmt> P = Succ->getLocationAs<PostStmt>())
+      if (const BinaryOperator *BO = P->getStmtAs<BinaryOperator>())
+        if (BO->isAssignmentOp())
+          InitE = BO->getRHS();
+
+    // If this is a call entry, the variable should be a parameter.
+    // FIXME: Handle CXXThisRegion as well. (This is not a priority because
+    // 'this' should never be NULL, but this visitor isn't just for NULL and
+    // UndefinedVal.)
+    if (Optional<CallEnter> CE = Succ->getLocationAs<CallEnter>()) {
+      if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
+        const ParmVarDecl *Param = cast<ParmVarDecl>(VR->getDecl());
+        
+        ProgramStateManager &StateMgr = BRC.getStateManager();
+        CallEventManager &CallMgr = StateMgr.getCallEventManager();
+
+        CallEventRef<> Call = CallMgr.getCaller(CE->getCalleeContext(),
+                                                Succ->getState());
+        InitE = Call->getArgExpr(Param->getFunctionScopeIndex());
+        IsParam = true;
+      }
+    }
+
+    // If this is a CXXTempObjectRegion, the Expr responsible for its creation
+    // is wrapped inside of it.
+    if (const CXXTempObjectRegion *TmpR = dyn_cast<CXXTempObjectRegion>(R))
+      InitE = TmpR->getExpr();
+  }
+
+  if (!StoreSite)
+    return NULL;
+  Satisfied = true;
+
+  // If we have an expression that provided the value, try to track where it
+  // came from.
+  if (InitE) {
+    if (V.isUndef() || V.getAs<loc::ConcreteInt>()) {
+      if (!IsParam)
+        InitE = InitE->IgnoreParenCasts();
+      bugreporter::trackNullOrUndefValue(StoreSite, InitE, BR, IsParam,
+                                         EnableNullFPSuppression);
+    } else {
+      ReturnVisitor::addVisitorIfNecessary(StoreSite, InitE->IgnoreParenCasts(),
+                                           BR, EnableNullFPSuppression);
+    }
+  }
+
+  // Okay, we've found the binding. Emit an appropriate message.
+  SmallString<256> sbuf;
+  llvm::raw_svector_ostream os(sbuf);
+
+  if (Optional<PostStmt> PS = StoreSite->getLocationAs<PostStmt>()) {
+    const Stmt *S = PS->getStmt();
+    const char *action = 0;
+    const DeclStmt *DS = dyn_cast<DeclStmt>(S);
+    const VarRegion *VR = dyn_cast<VarRegion>(R);
+
+    if (DS) {
+      action = R->canPrintPretty() ? "initialized to " :
+                                     "Initializing to ";
+    } else if (isa<BlockExpr>(S)) {
+      action = R->canPrintPretty() ? "captured by block as " :
+                                     "Captured by block as ";
+      if (VR) {
+        // See if we can get the BlockVarRegion.
+        ProgramStateRef State = StoreSite->getState();
+        SVal V = State->getSVal(S, PS->getLocationContext());
+        if (const BlockDataRegion *BDR =
+              dyn_cast_or_null<BlockDataRegion>(V.getAsRegion())) {
+          if (const VarRegion *OriginalR = BDR->getOriginalRegion(VR)) {
+            if (Optional<KnownSVal> KV =
+                State->getSVal(OriginalR).getAs<KnownSVal>())
+              BR.addVisitor(new FindLastStoreBRVisitor(*KV, OriginalR,
+                                                      EnableNullFPSuppression));
+          }
+        }
+      }
+    }
+
+    if (action) {
+      if (R->canPrintPretty()) {
+        R->printPretty(os);
+        os << " ";
+      }
+
+      if (V.getAs<loc::ConcreteInt>()) {
+        bool b = false;
+        if (R->isBoundable()) {
+          if (const TypedValueRegion *TR = dyn_cast<TypedValueRegion>(R)) {
+            if (TR->getValueType()->isObjCObjectPointerType()) {
+              os << action << "nil";
+              b = true;
+            }
+          }
+        }
+
+        if (!b)
+          os << action << "a null pointer value";
+      } else if (Optional<nonloc::ConcreteInt> CVal =
+                     V.getAs<nonloc::ConcreteInt>()) {
+        os << action << CVal->getValue();
+      }
+      else if (DS) {
+        if (V.isUndef()) {
+          if (isa<VarRegion>(R)) {
+            const VarDecl *VD = cast<VarDecl>(DS->getSingleDecl());
+            if (VD->getInit()) {
+              os << (R->canPrintPretty() ? "initialized" : "Initializing")
+                 << " to a garbage value";
+            } else {
+              os << (R->canPrintPretty() ? "declared" : "Declaring")
+                 << " without an initial value";
+            }
+          }
+        }
+        else {
+          os << (R->canPrintPretty() ? "initialized" : "Initialized")
+             << " here";
+        }
+      }
+    }
+  } else if (StoreSite->getLocation().getAs<CallEnter>()) {
+    if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
+      const ParmVarDecl *Param = cast<ParmVarDecl>(VR->getDecl());
+
+      os << "Passing ";
+
+      if (V.getAs<loc::ConcreteInt>()) {
+        if (Param->getType()->isObjCObjectPointerType())
+          os << "nil object reference";
+        else
+          os << "null pointer value";
+      } else if (V.isUndef()) {
+        os << "uninitialized value";
+      } else if (Optional<nonloc::ConcreteInt> CI =
+                     V.getAs<nonloc::ConcreteInt>()) {
+        os << "the value " << CI->getValue();
+      } else {
+        os << "value";
+      }
+
+      // Printed parameter indexes are 1-based, not 0-based.
+      unsigned Idx = Param->getFunctionScopeIndex() + 1;
+      os << " via " << Idx << llvm::getOrdinalSuffix(Idx) << " parameter";
+      if (R->canPrintPretty()) {
+        os << " ";
+        R->printPretty(os);
+      }
+    }
+  }
+
+  if (os.str().empty()) {
+    if (V.getAs<loc::ConcreteInt>()) {
+      bool b = false;
+      if (R->isBoundable()) {
+        if (const TypedValueRegion *TR = dyn_cast<TypedValueRegion>(R)) {
+          if (TR->getValueType()->isObjCObjectPointerType()) {
+            os << "nil object reference stored";
+            b = true;
+          }
+        }
+      }
+      if (!b) {
+        if (R->canPrintPretty())
+          os << "Null pointer value stored";
+        else
+          os << "Storing null pointer value";
+      }
+
+    } else if (V.isUndef()) {
+      if (R->canPrintPretty())
+        os << "Uninitialized value stored";
+      else
+        os << "Storing uninitialized value";
+
+    } else if (Optional<nonloc::ConcreteInt> CV =
+                   V.getAs<nonloc::ConcreteInt>()) {
+      if (R->canPrintPretty())
+        os << "The value " << CV->getValue() << " is assigned";
+      else
+        os << "Assigning " << CV->getValue();
+
+    } else {
+      if (R->canPrintPretty())
+        os << "Value assigned";
+      else
+        os << "Assigning value";
+    }
+    
+    if (R->canPrintPretty()) {
+      os << " to ";
+      R->printPretty(os);
+    }
+  }
+
+  // Construct a new PathDiagnosticPiece.
+  ProgramPoint P = StoreSite->getLocation();
+  PathDiagnosticLocation L;
+  if (P.getAs<CallEnter>() && InitE)
+    L = PathDiagnosticLocation(InitE, BRC.getSourceManager(),
+                               P.getLocationContext());
+  else
+    L = PathDiagnosticLocation::create(P, BRC.getSourceManager());
+  if (!L.isValid())
+    return NULL;
+  return new PathDiagnosticEventPiece(L, os.str());
+}
+
+void TrackConstraintBRVisitor::Profile(llvm::FoldingSetNodeID &ID) const {
+  static int tag = 0;
+  ID.AddPointer(&tag);
+  ID.AddBoolean(Assumption);
+  ID.Add(Constraint);
+}
+
+/// Return the tag associated with this visitor.  This tag will be used
+/// to make all PathDiagnosticPieces created by this visitor.
+const char *TrackConstraintBRVisitor::getTag() {
+  return "TrackConstraintBRVisitor";
+}
+
+bool TrackConstraintBRVisitor::isUnderconstrained(const ExplodedNode *N) const {
+  if (IsZeroCheck)
+    return N->getState()->isNull(Constraint).isUnderconstrained();
+  return N->getState()->assume(Constraint, !Assumption);
+}
+
+PathDiagnosticPiece *
+TrackConstraintBRVisitor::VisitNode(const ExplodedNode *N,
+                                    const ExplodedNode *PrevN,
+                                    BugReporterContext &BRC,
+                                    BugReport &BR) {
+  if (IsSatisfied)
+    return NULL;
+
+  // Start tracking after we see the first state in which the value is
+  // constrained.
+  if (!IsTrackingTurnedOn)
+    if (!isUnderconstrained(N))
+      IsTrackingTurnedOn = true;
+  if (!IsTrackingTurnedOn)
+    return 0;
+
+  // Check if in the previous state it was feasible for this constraint
+  // to *not* be true.
+  if (isUnderconstrained(PrevN)) {
+
+    IsSatisfied = true;
+
+    // As a sanity check, make sure that the negation of the constraint
+    // was infeasible in the current state.  If it is feasible, we somehow
+    // missed the transition point.
+    assert(!isUnderconstrained(N));
+
+    // We found the transition point for the constraint.  We now need to
+    // pretty-print the constraint. (work-in-progress)
+    SmallString<64> sbuf;
+    llvm::raw_svector_ostream os(sbuf);
+
+    if (Constraint.getAs<Loc>()) {
+      os << "Assuming pointer value is ";
+      os << (Assumption ? "non-null" : "null");
+    }
+
+    if (os.str().empty())
+      return NULL;
+
+    // Construct a new PathDiagnosticPiece.
+    ProgramPoint P = N->getLocation();
+    PathDiagnosticLocation L =
+      PathDiagnosticLocation::create(P, BRC.getSourceManager());
+    if (!L.isValid())
+      return NULL;
+    
+    PathDiagnosticEventPiece *X = new PathDiagnosticEventPiece(L, os.str());
+    X->setTag(getTag());
+    return X;
+  }
+
+  return NULL;
+}
+
+SuppressInlineDefensiveChecksVisitor::
+SuppressInlineDefensiveChecksVisitor(DefinedSVal Value, const ExplodedNode *N)
+  : V(Value), IsSatisfied(false), IsTrackingTurnedOn(false) {
+
+    // Check if the visitor is disabled.
+    SubEngine *Eng = N->getState()->getStateManager().getOwningEngine();
+    assert(Eng && "Cannot file a bug report without an owning engine");
+    AnalyzerOptions &Options = Eng->getAnalysisManager().options;
+    if (!Options.shouldSuppressInlinedDefensiveChecks())
+      IsSatisfied = true;
+
+    assert(N->getState()->isNull(V).isConstrainedTrue() &&
+           "The visitor only tracks the cases where V is constrained to 0");
+}
+
+void SuppressInlineDefensiveChecksVisitor::Profile(FoldingSetNodeID &ID) const {
+  static int id = 0;
+  ID.AddPointer(&id);
+  ID.Add(V);
+}
+
+const char *SuppressInlineDefensiveChecksVisitor::getTag() {
+  return "IDCVisitor";
+}
+
+PathDiagnosticPiece *
+SuppressInlineDefensiveChecksVisitor::VisitNode(const ExplodedNode *Succ,
+                                                const ExplodedNode *Pred,
+                                                BugReporterContext &BRC,
+                                                BugReport &BR) {
+  if (IsSatisfied)
+    return 0;
+
+  // Start tracking after we see the first state in which the value is null.
+  if (!IsTrackingTurnedOn)
+    if (Succ->getState()->isNull(V).isConstrainedTrue())
+      IsTrackingTurnedOn = true;
+  if (!IsTrackingTurnedOn)
+    return 0;
+
+  // Check if in the previous state it was feasible for this value
+  // to *not* be null.
+  if (!Pred->getState()->isNull(V).isConstrainedTrue()) {
+    IsSatisfied = true;
+
+    assert(Succ->getState()->isNull(V).isConstrainedTrue());
+
+    // Check if this is inlined defensive checks.
+    const LocationContext *CurLC =Succ->getLocationContext();
+    const LocationContext *ReportLC = BR.getErrorNode()->getLocationContext();
+    if (CurLC != ReportLC && !CurLC->isParentOf(ReportLC))
+      BR.markInvalid("Suppress IDC", CurLC);
+  }
+  return 0;
+}
+
+static const MemRegion *getLocationRegionIfReference(const Expr *E,
+                                                     const ExplodedNode *N) {
+  if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E)) {
+    if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+      if (!VD->getType()->isReferenceType())
+        return 0;
+      ProgramStateManager &StateMgr = N->getState()->getStateManager();
+      MemRegionManager &MRMgr = StateMgr.getRegionManager();
+      return MRMgr.getVarRegion(VD, N->getLocationContext());
+    }
+  }
+
+  // FIXME: This does not handle other kinds of null references,
+  // for example, references from FieldRegions:
+  //   struct Wrapper { int &ref; };
+  //   Wrapper w = { *(int *)0 };
+  //   w.ref = 1;
+
+  return 0;
+}
+
+static const Expr *peelOffOuterExpr(const Expr *Ex,
+                                    const ExplodedNode *N) {
+  Ex = Ex->IgnoreParenCasts();
+  if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(Ex))
+    return peelOffOuterExpr(EWC->getSubExpr(), N);
+  if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(Ex))
+    return peelOffOuterExpr(OVE->getSourceExpr(), N);
+
+  // Peel off the ternary operator.
+  if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(Ex)) {
+    // Find a node where the branching occured and find out which branch
+    // we took (true/false) by looking at the ExplodedGraph.
+    const ExplodedNode *NI = N;
+    do {
+      ProgramPoint ProgPoint = NI->getLocation();
+      if (Optional<BlockEdge> BE = ProgPoint.getAs<BlockEdge>()) {
+        const CFGBlock *srcBlk = BE->getSrc();
+        if (const Stmt *term = srcBlk->getTerminator()) {
+          if (term == CO) {
+            bool TookTrueBranch = (*(srcBlk->succ_begin()) == BE->getDst());
+            if (TookTrueBranch)
+              return peelOffOuterExpr(CO->getTrueExpr(), N);
+            else
+              return peelOffOuterExpr(CO->getFalseExpr(), N);
+          }
+        }
+      }
+      NI = NI->getFirstPred();
+    } while (NI);
+  }
+  return Ex;
+}
+
+bool bugreporter::trackNullOrUndefValue(const ExplodedNode *N,
+                                        const Stmt *S,
+                                        BugReport &report, bool IsArg,
+                                        bool EnableNullFPSuppression) {
+  if (!S || !N)
+    return false;
+
+  if (const Expr *Ex = dyn_cast<Expr>(S)) {
+    Ex = Ex->IgnoreParenCasts();
+    const Expr *PeeledEx = peelOffOuterExpr(Ex, N);
+    if (Ex != PeeledEx)
+      S = PeeledEx;
+  }
+
+  const Expr *Inner = 0;
+  if (const Expr *Ex = dyn_cast<Expr>(S)) {
+    Ex = Ex->IgnoreParenCasts();
+    if (ExplodedGraph::isInterestingLValueExpr(Ex) || CallEvent::isCallStmt(Ex))
+      Inner = Ex;
+  }
+
+  if (IsArg && !Inner) {
+    assert(N->getLocation().getAs<CallEnter>() && "Tracking arg but not at call");
+  } else {
+    // Walk through nodes until we get one that matches the statement exactly.
+    // Alternately, if we hit a known lvalue for the statement, we know we've
+    // gone too far (though we can likely track the lvalue better anyway).
+    do {
+      const ProgramPoint &pp = N->getLocation();
+      if (Optional<StmtPoint> ps = pp.getAs<StmtPoint>()) {
+        if (ps->getStmt() == S || ps->getStmt() == Inner)
+          break;
+      } else if (Optional<CallExitEnd> CEE = pp.getAs<CallExitEnd>()) {
+        if (CEE->getCalleeContext()->getCallSite() == S ||
+            CEE->getCalleeContext()->getCallSite() == Inner)
+          break;
+      }
+      N = N->getFirstPred();
+    } while (N);
+
+    if (!N)
+      return false;
+  }
+  
+  ProgramStateRef state = N->getState();
+
+  // The message send could be nil due to the receiver being nil.
+  // At this point in the path, the receiver should be live since we are at the
+  // message send expr. If it is nil, start tracking it.
+  if (const Expr *Receiver = NilReceiverBRVisitor::getNilReceiver(S, N))
+    trackNullOrUndefValue(N, Receiver, report, false, EnableNullFPSuppression);
+
+
+  // See if the expression we're interested refers to a variable.
+  // If so, we can track both its contents and constraints on its value.
+  if (Inner && ExplodedGraph::isInterestingLValueExpr(Inner)) {
+    const MemRegion *R = 0;
+
+    // Find the ExplodedNode where the lvalue (the value of 'Ex')
+    // was computed.  We need this for getting the location value.
+    const ExplodedNode *LVNode = N;
+    while (LVNode) {
+      if (Optional<PostStmt> P = LVNode->getLocation().getAs<PostStmt>()) {
+        if (P->getStmt() == Inner)
+          break;
+      }
+      LVNode = LVNode->getFirstPred();
+    }
+    assert(LVNode && "Unable to find the lvalue node.");
+    ProgramStateRef LVState = LVNode->getState();
+    SVal LVal = LVState->getSVal(Inner, LVNode->getLocationContext());
+    
+    if (LVState->isNull(LVal).isConstrainedTrue()) {
+      // In case of C++ references, we want to differentiate between a null
+      // reference and reference to null pointer.
+      // If the LVal is null, check if we are dealing with null reference.
+      // For those, we want to track the location of the reference.
+      if (const MemRegion *RR = getLocationRegionIfReference(Inner, N))
+        R = RR;
+    } else {
+      R = LVState->getSVal(Inner, LVNode->getLocationContext()).getAsRegion();
+
+      // If this is a C++ reference to a null pointer, we are tracking the
+      // pointer. In additon, we should find the store at which the reference
+      // got initialized.
+      if (const MemRegion *RR = getLocationRegionIfReference(Inner, N)) {
+        if (Optional<KnownSVal> KV = LVal.getAs<KnownSVal>())
+          report.addVisitor(new FindLastStoreBRVisitor(*KV, RR,
+                                                      EnableNullFPSuppression));
+      }
+    }
+
+    if (R) {
+      // Mark both the variable region and its contents as interesting.
+      SVal V = LVState->getRawSVal(loc::MemRegionVal(R));
+
+      report.markInteresting(R);
+      report.markInteresting(V);
+      report.addVisitor(new UndefOrNullArgVisitor(R));
+
+      // If the contents are symbolic, find out when they became null.
+      if (V.getAsLocSymbol(/*IncludeBaseRegions*/ true)) {
+        BugReporterVisitor *ConstraintTracker =
+          new TrackConstraintBRVisitor(V.castAs<DefinedSVal>(), false);
+        report.addVisitor(ConstraintTracker);
+
+        // Add visitor, which will suppress inline defensive checks.
+        if (LVState->isNull(V).isConstrainedTrue() &&
+            EnableNullFPSuppression) {
+          BugReporterVisitor *IDCSuppressor =
+            new SuppressInlineDefensiveChecksVisitor(V.castAs<DefinedSVal>(),
+                                                     LVNode);
+          report.addVisitor(IDCSuppressor);
+        }
+      }
+
+      if (Optional<KnownSVal> KV = V.getAs<KnownSVal>())
+        report.addVisitor(new FindLastStoreBRVisitor(*KV, R,
+                                                     EnableNullFPSuppression));
+      return true;
+    }
+  }
+
+  // If the expression is not an "lvalue expression", we can still
+  // track the constraints on its contents.
+  SVal V = state->getSValAsScalarOrLoc(S, N->getLocationContext());
+
+  // If the value came from an inlined function call, we should at least make
+  // sure that function isn't pruned in our output.
+  if (const Expr *E = dyn_cast<Expr>(S))
+    S = E->IgnoreParenCasts();
+
+  ReturnVisitor::addVisitorIfNecessary(N, S, report, EnableNullFPSuppression);
+
+  // Uncomment this to find cases where we aren't properly getting the
+  // base value that was dereferenced.
+  // assert(!V.isUnknownOrUndef());
+  // Is it a symbolic value?
+  if (Optional<loc::MemRegionVal> L = V.getAs<loc::MemRegionVal>()) {
+    // At this point we are dealing with the region's LValue.
+    // However, if the rvalue is a symbolic region, we should track it as well.
+    // Try to use the correct type when looking up the value.
+    SVal RVal;
+    if (const Expr *E = dyn_cast<Expr>(S))
+      RVal = state->getRawSVal(L.getValue(), E->getType());
+    else
+      RVal = state->getSVal(L->getRegion());
+
+    const MemRegion *RegionRVal = RVal.getAsRegion();
+    report.addVisitor(new UndefOrNullArgVisitor(L->getRegion()));
+
+    if (RegionRVal && isa<SymbolicRegion>(RegionRVal)) {
+      report.markInteresting(RegionRVal);
+      report.addVisitor(new TrackConstraintBRVisitor(
+        loc::MemRegionVal(RegionRVal), false));
+    }
+  }
+
+  return true;
+}
+
+const Expr *NilReceiverBRVisitor::getNilReceiver(const Stmt *S,
+                                                 const ExplodedNode *N) {
+  const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S);
+  if (!ME)
+    return 0;
+  if (const Expr *Receiver = ME->getInstanceReceiver()) {
+    ProgramStateRef state = N->getState();
+    SVal V = state->getSVal(Receiver, N->getLocationContext());
+    if (state->isNull(V).isConstrainedTrue())
+      return Receiver;
+  }
+  return 0;
+}
+
+PathDiagnosticPiece *NilReceiverBRVisitor::VisitNode(const ExplodedNode *N,
+                                                     const ExplodedNode *PrevN,
+                                                     BugReporterContext &BRC,
+                                                     BugReport &BR) {
+  Optional<PreStmt> P = N->getLocationAs<PreStmt>();
+  if (!P)
+    return 0;
+
+  const Stmt *S = P->getStmt();
+  const Expr *Receiver = getNilReceiver(S, N);
+  if (!Receiver)
+    return 0;
+
+  llvm::SmallString<256> Buf;
+  llvm::raw_svector_ostream OS(Buf);
+
+  if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S)) {
+    OS << "'" << ME->getSelector().getAsString() << "' not called";
+  }
+  else {
+    OS << "No method is called";
+  }
+  OS << " because the receiver is nil";
+
+  // The receiver was nil, and hence the method was skipped.
+  // Register a BugReporterVisitor to issue a message telling us how
+  // the receiver was null.
+  bugreporter::trackNullOrUndefValue(N, Receiver, BR, /*IsArg*/ false,
+                                     /*EnableNullFPSuppression*/ false);
+  // Issue a message saying that the method was skipped.
+  PathDiagnosticLocation L(Receiver, BRC.getSourceManager(),
+                                     N->getLocationContext());
+  return new PathDiagnosticEventPiece(L, OS.str());
+}
+
+// Registers every VarDecl inside a Stmt with a last store visitor.
+void FindLastStoreBRVisitor::registerStatementVarDecls(BugReport &BR,
+                                                const Stmt *S,
+                                                bool EnableNullFPSuppression) {
+  const ExplodedNode *N = BR.getErrorNode();
+  std::deque<const Stmt *> WorkList;
+  WorkList.push_back(S);
+
+  while (!WorkList.empty()) {
+    const Stmt *Head = WorkList.front();
+    WorkList.pop_front();
+
+    ProgramStateRef state = N->getState();
+    ProgramStateManager &StateMgr = state->getStateManager();
+
+    if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Head)) {
+      if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+        const VarRegion *R =
+        StateMgr.getRegionManager().getVarRegion(VD, N->getLocationContext());
+
+        // What did we load?
+        SVal V = state->getSVal(S, N->getLocationContext());
+
+        if (V.getAs<loc::ConcreteInt>() || V.getAs<nonloc::ConcreteInt>()) {
+          // Register a new visitor with the BugReport.
+          BR.addVisitor(new FindLastStoreBRVisitor(V.castAs<KnownSVal>(), R,
+                                                   EnableNullFPSuppression));
+        }
+      }
+    }
+
+    for (Stmt::const_child_iterator I = Head->child_begin();
+        I != Head->child_end(); ++I)
+      WorkList.push_back(*I);
+  }
+}
+
+//===----------------------------------------------------------------------===//
+// Visitor that tries to report interesting diagnostics from conditions.
+//===----------------------------------------------------------------------===//
+
+/// Return the tag associated with this visitor.  This tag will be used
+/// to make all PathDiagnosticPieces created by this visitor.
+const char *ConditionBRVisitor::getTag() {
+  return "ConditionBRVisitor";
+}
+
+PathDiagnosticPiece *ConditionBRVisitor::VisitNode(const ExplodedNode *N,
+                                                   const ExplodedNode *Prev,
+                                                   BugReporterContext &BRC,
+                                                   BugReport &BR) {
+  PathDiagnosticPiece *piece = VisitNodeImpl(N, Prev, BRC, BR);
+  if (piece) {
+    piece->setTag(getTag());
+    if (PathDiagnosticEventPiece *ev=dyn_cast<PathDiagnosticEventPiece>(piece))
+      ev->setPrunable(true, /* override */ false);
+  }
+  return piece;
+}
+
+PathDiagnosticPiece *ConditionBRVisitor::VisitNodeImpl(const ExplodedNode *N,
+                                                       const ExplodedNode *Prev,
+                                                       BugReporterContext &BRC,
+                                                       BugReport &BR) {
+  
+  ProgramPoint progPoint = N->getLocation();
+  ProgramStateRef CurrentState = N->getState();
+  ProgramStateRef PrevState = Prev->getState();
+  
+  // Compare the GDMs of the state, because that is where constraints
+  // are managed.  Note that ensure that we only look at nodes that
+  // were generated by the analyzer engine proper, not checkers.
+  if (CurrentState->getGDM().getRoot() ==
+      PrevState->getGDM().getRoot())
+    return 0;
+  
+  // If an assumption was made on a branch, it should be caught
+  // here by looking at the state transition.
+  if (Optional<BlockEdge> BE = progPoint.getAs<BlockEdge>()) {
+    const CFGBlock *srcBlk = BE->getSrc();    
+    if (const Stmt *term = srcBlk->getTerminator())
+      return VisitTerminator(term, N, srcBlk, BE->getDst(), BR, BRC);
+    return 0;
+  }
+  
+  if (Optional<PostStmt> PS = progPoint.getAs<PostStmt>()) {
+    // FIXME: Assuming that BugReporter is a GRBugReporter is a layering
+    // violation.
+    const std::pair<const ProgramPointTag *, const ProgramPointTag *> &tags =      
+      cast<GRBugReporter>(BRC.getBugReporter()).
+        getEngine().geteagerlyAssumeBinOpBifurcationTags();
+
+    const ProgramPointTag *tag = PS->getTag();
+    if (tag == tags.first)
+      return VisitTrueTest(cast<Expr>(PS->getStmt()), true,
+                           BRC, BR, N);
+    if (tag == tags.second)
+      return VisitTrueTest(cast<Expr>(PS->getStmt()), false,
+                           BRC, BR, N);
+                           
+    return 0;
+  }
+    
+  return 0;
+}
+
+PathDiagnosticPiece *
+ConditionBRVisitor::VisitTerminator(const Stmt *Term,
+                                    const ExplodedNode *N,
+                                    const CFGBlock *srcBlk,
+                                    const CFGBlock *dstBlk,
+                                    BugReport &R,
+                                    BugReporterContext &BRC) {
+  const Expr *Cond = 0;
+  
+  switch (Term->getStmtClass()) {
+  default:
+    return 0;
+  case Stmt::IfStmtClass:
+    Cond = cast<IfStmt>(Term)->getCond();
+    break;
+  case Stmt::ConditionalOperatorClass:
+    Cond = cast<ConditionalOperator>(Term)->getCond();
+    break;
+  }      
+
+  assert(Cond);
+  assert(srcBlk->succ_size() == 2);
+  const bool tookTrue = *(srcBlk->succ_begin()) == dstBlk;
+  return VisitTrueTest(Cond, tookTrue, BRC, R, N);
+}
+
+PathDiagnosticPiece *
+ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
+                                  bool tookTrue,
+                                  BugReporterContext &BRC,
+                                  BugReport &R,
+                                  const ExplodedNode *N) {
+  
+  const Expr *Ex = Cond;
+  
+  while (true) {
+    Ex = Ex->IgnoreParenCasts();
+    switch (Ex->getStmtClass()) {
+      default:
+        return 0;
+      case Stmt::BinaryOperatorClass:
+        return VisitTrueTest(Cond, cast<BinaryOperator>(Ex), tookTrue, BRC,
+                             R, N);
+      case Stmt::DeclRefExprClass:
+        return VisitTrueTest(Cond, cast<DeclRefExpr>(Ex), tookTrue, BRC,
+                             R, N);
+      case Stmt::UnaryOperatorClass: {
+        const UnaryOperator *UO = cast<UnaryOperator>(Ex);
+        if (UO->getOpcode() == UO_LNot) {
+          tookTrue = !tookTrue;
+          Ex = UO->getSubExpr();
+          continue;
+        }
+        return 0;
+      }
+    }
+  }
+}
+
+bool ConditionBRVisitor::patternMatch(const Expr *Ex, raw_ostream &Out,
+                                      BugReporterContext &BRC,
+                                      BugReport &report,
+                                      const ExplodedNode *N,
+                                      Optional<bool> &prunable) {
+  const Expr *OriginalExpr = Ex;
+  Ex = Ex->IgnoreParenCasts();
+
+  if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Ex)) {
+    const bool quotes = isa<VarDecl>(DR->getDecl());
+    if (quotes) {
+      Out << '\'';
+      const LocationContext *LCtx = N->getLocationContext();
+      const ProgramState *state = N->getState().getPtr();
+      if (const MemRegion *R = state->getLValue(cast<VarDecl>(DR->getDecl()),
+                                                LCtx).getAsRegion()) {
+        if (report.isInteresting(R))
+          prunable = false;
+        else {
+          const ProgramState *state = N->getState().getPtr();
+          SVal V = state->getSVal(R);
+          if (report.isInteresting(V))
+            prunable = false;
+        }
+      }
+    }
+    Out << DR->getDecl()->getDeclName().getAsString();
+    if (quotes)
+      Out << '\'';
+    return quotes;
+  }
+  
+  if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(Ex)) {
+    QualType OriginalTy = OriginalExpr->getType();
+    if (OriginalTy->isPointerType()) {
+      if (IL->getValue() == 0) {
+        Out << "null";
+        return false;
+      }
+    }
+    else if (OriginalTy->isObjCObjectPointerType()) {
+      if (IL->getValue() == 0) {
+        Out << "nil";
+        return false;
+      }
+    }
+    
+    Out << IL->getValue();
+    return false;
+  }
+  
+  return false;
+}
+
+PathDiagnosticPiece *
+ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
+                                  const BinaryOperator *BExpr,
+                                  const bool tookTrue,
+                                  BugReporterContext &BRC,
+                                  BugReport &R,
+                                  const ExplodedNode *N) {
+  
+  bool shouldInvert = false;
+  Optional<bool> shouldPrune;
+  
+  SmallString<128> LhsString, RhsString;
+  {
+    llvm::raw_svector_ostream OutLHS(LhsString), OutRHS(RhsString);
+    const bool isVarLHS = patternMatch(BExpr->getLHS(), OutLHS, BRC, R, N,
+                                       shouldPrune);
+    const bool isVarRHS = patternMatch(BExpr->getRHS(), OutRHS, BRC, R, N,
+                                       shouldPrune);
+    
+    shouldInvert = !isVarLHS && isVarRHS;    
+  }
+  
+  BinaryOperator::Opcode Op = BExpr->getOpcode();
+
+  if (BinaryOperator::isAssignmentOp(Op)) {
+    // For assignment operators, all that we care about is that the LHS
+    // evaluates to "true" or "false".
+    return VisitConditionVariable(LhsString, BExpr->getLHS(), tookTrue,
+                                  BRC, R, N);
+  }
+
+  // For non-assignment operations, we require that we can understand
+  // both the LHS and RHS.
+  if (LhsString.empty() || RhsString.empty())
+    return 0;
+  
+  // Should we invert the strings if the LHS is not a variable name?
+  SmallString<256> buf;
+  llvm::raw_svector_ostream Out(buf);
+  Out << "Assuming " << (shouldInvert ? RhsString : LhsString) << " is ";
+
+  // Do we need to invert the opcode?
+  if (shouldInvert)
+    switch (Op) {
+      default: break;
+      case BO_LT: Op = BO_GT; break;
+      case BO_GT: Op = BO_LT; break;
+      case BO_LE: Op = BO_GE; break;
+      case BO_GE: Op = BO_LE; break;
+    }
+
+  if (!tookTrue)
+    switch (Op) {
+      case BO_EQ: Op = BO_NE; break;
+      case BO_NE: Op = BO_EQ; break;
+      case BO_LT: Op = BO_GE; break;
+      case BO_GT: Op = BO_LE; break;
+      case BO_LE: Op = BO_GT; break;
+      case BO_GE: Op = BO_LT; break;
+      default:
+        return 0;
+    }
+  
+  switch (Op) {
+    case BO_EQ:
+      Out << "equal to ";
+      break;
+    case BO_NE:
+      Out << "not equal to ";
+      break;
+    default:
+      Out << BinaryOperator::getOpcodeStr(Op) << ' ';
+      break;
+  }
+  
+  Out << (shouldInvert ? LhsString : RhsString);
+  const LocationContext *LCtx = N->getLocationContext();
+  PathDiagnosticLocation Loc(Cond, BRC.getSourceManager(), LCtx);
+  PathDiagnosticEventPiece *event =
+    new PathDiagnosticEventPiece(Loc, Out.str());
+  if (shouldPrune.hasValue())
+    event->setPrunable(shouldPrune.getValue());
+  return event;
+}
+
+PathDiagnosticPiece *
+ConditionBRVisitor::VisitConditionVariable(StringRef LhsString,
+                                           const Expr *CondVarExpr,
+                                           const bool tookTrue,
+                                           BugReporterContext &BRC,
+                                           BugReport &report,
+                                           const ExplodedNode *N) {
+  // FIXME: If there's already a constraint tracker for this variable,
+  // we shouldn't emit anything here (c.f. the double note in
+  // test/Analysis/inlining/path-notes.c)
+  SmallString<256> buf;
+  llvm::raw_svector_ostream Out(buf);
+  Out << "Assuming " << LhsString << " is ";
+  
+  QualType Ty = CondVarExpr->getType();
+
+  if (Ty->isPointerType())
+    Out << (tookTrue ? "not null" : "null");
+  else if (Ty->isObjCObjectPointerType())
+    Out << (tookTrue ? "not nil" : "nil");
+  else if (Ty->isBooleanType())
+    Out << (tookTrue ? "true" : "false");
+  else if (Ty->isIntegralOrEnumerationType())
+    Out << (tookTrue ? "non-zero" : "zero");
+  else
+    return 0;
+
+  const LocationContext *LCtx = N->getLocationContext();
+  PathDiagnosticLocation Loc(CondVarExpr, BRC.getSourceManager(), LCtx);
+  PathDiagnosticEventPiece *event =
+    new PathDiagnosticEventPiece(Loc, Out.str());
+
+  if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(CondVarExpr)) {
+    if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+      const ProgramState *state = N->getState().getPtr();
+      if (const MemRegion *R = state->getLValue(VD, LCtx).getAsRegion()) {
+        if (report.isInteresting(R))
+          event->setPrunable(false);
+      }
+    }
+  }
+  
+  return event;
+}
+  
+PathDiagnosticPiece *
+ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
+                                  const DeclRefExpr *DR,
+                                  const bool tookTrue,
+                                  BugReporterContext &BRC,
+                                  BugReport &report,
+                                  const ExplodedNode *N) {
+
+  const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl());
+  if (!VD)
+    return 0;
+  
+  SmallString<256> Buf;
+  llvm::raw_svector_ostream Out(Buf);
+    
+  Out << "Assuming '";
+  VD->getDeclName().printName(Out);
+  Out << "' is ";
+    
+  QualType VDTy = VD->getType();
+  
+  if (VDTy->isPointerType())
+    Out << (tookTrue ? "non-null" : "null");
+  else if (VDTy->isObjCObjectPointerType())
+    Out << (tookTrue ? "non-nil" : "nil");
+  else if (VDTy->isScalarType())
+    Out << (tookTrue ? "not equal to 0" : "0");
+  else
+    return 0;
+  
+  const LocationContext *LCtx = N->getLocationContext();
+  PathDiagnosticLocation Loc(Cond, BRC.getSourceManager(), LCtx);
+  PathDiagnosticEventPiece *event =
+    new PathDiagnosticEventPiece(Loc, Out.str());
+  
+  const ProgramState *state = N->getState().getPtr();
+  if (const MemRegion *R = state->getLValue(VD, LCtx).getAsRegion()) {
+    if (report.isInteresting(R))
+      event->setPrunable(false);
+    else {
+      SVal V = state->getSVal(R);
+      if (report.isInteresting(V))
+        event->setPrunable(false);
+    }
+  }
+  return event;
+}
+
+
+// FIXME: Copied from ExprEngineCallAndReturn.cpp.
+static bool isInStdNamespace(const Decl *D) {
+  const DeclContext *DC = D->getDeclContext()->getEnclosingNamespaceContext();
+  const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC);
+  if (!ND)
+    return false;
+
+  while (const NamespaceDecl *Parent = dyn_cast<NamespaceDecl>(ND->getParent()))
+    ND = Parent;
+
+  return ND->getName() == "std";
+}
+
+
+PathDiagnosticPiece *
+LikelyFalsePositiveSuppressionBRVisitor::getEndPath(BugReporterContext &BRC,
+                                                    const ExplodedNode *N,
+                                                    BugReport &BR) {
+  // Here we suppress false positives coming from system headers. This list is
+  // based on known issues.
+
+  // Skip reports within the 'std' namespace. Although these can sometimes be
+  // the user's fault, we currently don't report them very well, and
+  // Note that this will not help for any other data structure libraries, like
+  // TR1, Boost, or llvm/ADT.
+  ExprEngine &Eng = BRC.getBugReporter().getEngine();
+  AnalyzerOptions &Options = Eng.getAnalysisManager().options;
+  if (Options.shouldSuppressFromCXXStandardLibrary()) {
+    const LocationContext *LCtx = N->getLocationContext();
+    if (isInStdNamespace(LCtx->getDecl())) {
+      BR.markInvalid(getTag(), 0);
+      return 0;
+    }
+  }
+
+  // Skip reports within the sys/queue.h macros as we do not have the ability to
+  // reason about data structure shapes.
+  SourceManager &SM = BRC.getSourceManager();
+  FullSourceLoc Loc = BR.getLocation(SM).asLocation();
+  while (Loc.isMacroID()) {
+    if (SM.isInSystemMacro(Loc) &&
+       (SM.getFilename(SM.getSpellingLoc(Loc)).endswith("sys/queue.h"))) {
+      BR.markInvalid(getTag(), 0);
+      return 0;
+    }
+    Loc = Loc.getSpellingLoc();
+  }
+
+  return 0;
+}
+
+PathDiagnosticPiece *
+UndefOrNullArgVisitor::VisitNode(const ExplodedNode *N,
+                                  const ExplodedNode *PrevN,
+                                  BugReporterContext &BRC,
+                                  BugReport &BR) {
+
+  ProgramStateRef State = N->getState();
+  ProgramPoint ProgLoc = N->getLocation();
+
+  // We are only interested in visiting CallEnter nodes.
+  Optional<CallEnter> CEnter = ProgLoc.getAs<CallEnter>();
+  if (!CEnter)
+    return 0;
+
+  // Check if one of the arguments is the region the visitor is tracking.
+  CallEventManager &CEMgr = BRC.getStateManager().getCallEventManager();
+  CallEventRef<> Call = CEMgr.getCaller(CEnter->getCalleeContext(), State);
+  unsigned Idx = 0;
+  for (CallEvent::param_iterator I = Call->param_begin(),
+                                 E = Call->param_end(); I != E; ++I, ++Idx) {
+    const MemRegion *ArgReg = Call->getArgSVal(Idx).getAsRegion();
+
+    // Are we tracking the argument or its subregion?
+    if ( !ArgReg || (ArgReg != R && !R->isSubRegionOf(ArgReg->StripCasts())))
+      continue;
+
+    // Check the function parameter type.
+    const ParmVarDecl *ParamDecl = *I;
+    assert(ParamDecl && "Formal parameter has no decl?");
+    QualType T = ParamDecl->getType();
+
+    if (!(T->isAnyPointerType() || T->isReferenceType())) {
+      // Function can only change the value passed in by address.
+      continue;
+    }
+    
+    // If it is a const pointer value, the function does not intend to
+    // change the value.
+    if (T->getPointeeType().isConstQualified())
+      continue;
+
+    // Mark the call site (LocationContext) as interesting if the value of the 
+    // argument is undefined or '0'/'NULL'.
+    SVal BoundVal = State->getSVal(R);
+    if (BoundVal.isUndef() || BoundVal.isZeroConstant()) {
+      BR.markInteresting(CEnter->getCalleeContext());
+      return 0;
+    }
+  }
+  return 0;
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/CMakeLists.txt b/safecode/tools/clang/lib/StaticAnalyzer/Core/CMakeLists.txt
new file mode 100644
index 0000000..91f15b3
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/CMakeLists.txt
@@ -0,0 +1,59 @@
+set(LLVM_LINK_COMPONENTS support)
+
+add_clang_library(clangStaticAnalyzerCore
+  APSIntType.cpp
+  AnalysisManager.cpp
+  AnalyzerOptions.cpp
+  BasicValueFactory.cpp
+  BlockCounter.cpp
+  BugReporter.cpp
+  BugReporterVisitors.cpp
+  CallEvent.cpp
+  Checker.cpp
+  CheckerContext.cpp
+  CheckerHelpers.cpp
+  CheckerManager.cpp
+  CheckerRegistry.cpp
+  ConstraintManager.cpp
+  CoreEngine.cpp
+  Environment.cpp
+  ExplodedGraph.cpp
+  ExprEngine.cpp
+  ExprEngineC.cpp
+  ExprEngineCXX.cpp
+  ExprEngineCallAndReturn.cpp
+  ExprEngineObjC.cpp
+  FunctionSummary.cpp
+  HTMLDiagnostics.cpp
+  MemRegion.cpp
+  PathDiagnostic.cpp
+  PlistDiagnostics.cpp
+  ProgramState.cpp
+  RangeConstraintManager.cpp
+  RegionStore.cpp
+  SValBuilder.cpp
+  SVals.cpp
+  SimpleConstraintManager.cpp
+  SimpleSValBuilder.cpp
+  Store.cpp
+  SubEngine.cpp
+  SymbolManager.cpp
+  TextPathDiagnostics.cpp
+  )
+
+add_dependencies(clangStaticAnalyzerCore
+  ClangAttrClasses
+  ClangAttrList
+  ClangCommentNodes
+  ClangDeclNodes
+  ClangDiagnosticCommon
+  ClangStmtNodes
+  )
+
+target_link_libraries(clangStaticAnalyzerCore
+  clangBasic
+  clangLex
+  clangAST
+  clangFrontend
+  clangRewriteCore
+  )
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/CallEvent.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
new file mode 100644
index 0000000..dfd20b8
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
@@ -0,0 +1,955 @@
+//===- Calls.cpp - Wrapper for all function and method calls ------*- C++ -*--//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file This file defines CallEvent and its subclasses, which represent path-
+/// sensitive instances of different kinds of function and method calls
+/// (C, C++, and Objective-C).
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/Analysis/ProgramPoint.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+QualType CallEvent::getResultType() const {
+  const Expr *E = getOriginExpr();
+  assert(E && "Calls without origin expressions do not have results");
+  QualType ResultTy = E->getType();
+
+  ASTContext &Ctx = getState()->getStateManager().getContext();
+
+  // A function that returns a reference to 'int' will have a result type
+  // of simply 'int'. Check the origin expr's value kind to recover the
+  // proper type.
+  switch (E->getValueKind()) {
+  case VK_LValue:
+    ResultTy = Ctx.getLValueReferenceType(ResultTy);
+    break;
+  case VK_XValue:
+    ResultTy = Ctx.getRValueReferenceType(ResultTy);
+    break;
+  case VK_RValue:
+    // No adjustment is necessary.
+    break;
+  }
+
+  return ResultTy;
+}
+
+static bool isCallbackArg(SVal V, QualType T) {
+  // If the parameter is 0, it's harmless.
+  if (V.isZeroConstant())
+    return false;
+
+  // If a parameter is a block or a callback, assume it can modify pointer.
+  if (T->isBlockPointerType() ||
+      T->isFunctionPointerType() ||
+      T->isObjCSelType())
+    return true;
+
+  // Check if a callback is passed inside a struct (for both, struct passed by
+  // reference and by value). Dig just one level into the struct for now.
+
+  if (T->isAnyPointerType() || T->isReferenceType())
+    T = T->getPointeeType();
+
+  if (const RecordType *RT = T->getAsStructureType()) {
+    const RecordDecl *RD = RT->getDecl();
+    for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
+         I != E; ++I) {
+      QualType FieldT = I->getType();
+      if (FieldT->isBlockPointerType() || FieldT->isFunctionPointerType())
+        return true;
+    }
+  }
+
+  return false;
+}
+
+bool CallEvent::hasNonZeroCallbackArg() const {
+  unsigned NumOfArgs = getNumArgs();
+
+  // If calling using a function pointer, assume the function does not
+  // have a callback. TODO: We could check the types of the arguments here.
+  if (!getDecl())
+    return false;
+
+  unsigned Idx = 0;
+  for (CallEvent::param_type_iterator I = param_type_begin(),
+                                       E = param_type_end();
+       I != E && Idx < NumOfArgs; ++I, ++Idx) {
+    if (NumOfArgs <= Idx)
+      break;
+
+    if (isCallbackArg(getArgSVal(Idx), *I))
+      return true;
+  }
+  
+  return false;
+}
+
+bool CallEvent::isGlobalCFunction(StringRef FunctionName) const {
+  const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(getDecl());
+  if (!FD)
+    return false;
+
+  return CheckerContext::isCLibraryFunction(FD, FunctionName);
+}
+
+/// \brief Returns true if a type is a pointer-to-const or reference-to-const
+/// with no further indirection.
+static bool isPointerToConst(QualType Ty) {
+  QualType PointeeTy = Ty->getPointeeType();
+  if (PointeeTy == QualType())
+    return false;
+  if (!PointeeTy.isConstQualified())
+    return false;
+  if (PointeeTy->isAnyPointerType())
+    return false;
+  return true;
+}
+
+// Try to retrieve the function declaration and find the function parameter
+// types which are pointers/references to a non-pointer const.
+// We will not invalidate the corresponding argument regions.
+static void findPtrToConstParams(llvm::SmallSet<unsigned, 4> &PreserveArgs,
+                                 const CallEvent &Call) {
+  unsigned Idx = 0;
+  for (CallEvent::param_type_iterator I = Call.param_type_begin(),
+                                      E = Call.param_type_end();
+       I != E; ++I, ++Idx) {
+    if (isPointerToConst(*I))
+      PreserveArgs.insert(Idx);
+  }
+}
+
+ProgramStateRef CallEvent::invalidateRegions(unsigned BlockCount,
+                                             ProgramStateRef Orig) const {
+  ProgramStateRef Result = (Orig ? Orig : getState());
+
+  SmallVector<SVal, 8> ConstValues;
+  SmallVector<SVal, 8> ValuesToInvalidate;
+
+  getExtraInvalidatedValues(ValuesToInvalidate);
+
+  // Indexes of arguments whose values will be preserved by the call.
+  llvm::SmallSet<unsigned, 4> PreserveArgs;
+  if (!argumentsMayEscape())
+    findPtrToConstParams(PreserveArgs, *this);
+
+  for (unsigned Idx = 0, Count = getNumArgs(); Idx != Count; ++Idx) {
+    // Mark this region for invalidation.  We batch invalidate regions
+    // below for efficiency.
+    if (PreserveArgs.count(Idx))
+      ConstValues.push_back(getArgSVal(Idx));
+    else
+      ValuesToInvalidate.push_back(getArgSVal(Idx));
+  }
+
+  // Invalidate designated regions using the batch invalidation API.
+  // NOTE: Even if RegionsToInvalidate is empty, we may still invalidate
+  //  global variables.
+  return Result->invalidateRegions(ValuesToInvalidate, getOriginExpr(),
+                                   BlockCount, getLocationContext(),
+                                   /*CausedByPointerEscape*/ true,
+                                   /*Symbols=*/0, this, ConstValues);
+}
+
+ProgramPoint CallEvent::getProgramPoint(bool IsPreVisit,
+                                        const ProgramPointTag *Tag) const {
+  if (const Expr *E = getOriginExpr()) {
+    if (IsPreVisit)
+      return PreStmt(E, getLocationContext(), Tag);
+    return PostStmt(E, getLocationContext(), Tag);
+  }
+
+  const Decl *D = getDecl();
+  assert(D && "Cannot get a program point without a statement or decl");  
+
+  SourceLocation Loc = getSourceRange().getBegin();
+  if (IsPreVisit)
+    return PreImplicitCall(D, Loc, getLocationContext(), Tag);
+  return PostImplicitCall(D, Loc, getLocationContext(), Tag);
+}
+
+SVal CallEvent::getArgSVal(unsigned Index) const {
+  const Expr *ArgE = getArgExpr(Index);
+  if (!ArgE)
+    return UnknownVal();
+  return getSVal(ArgE);
+}
+
+SourceRange CallEvent::getArgSourceRange(unsigned Index) const {
+  const Expr *ArgE = getArgExpr(Index);
+  if (!ArgE)
+    return SourceRange();
+  return ArgE->getSourceRange();
+}
+
+SVal CallEvent::getReturnValue() const {
+  const Expr *E = getOriginExpr();
+  if (!E)
+    return UndefinedVal();
+  return getSVal(E);
+}
+
+void CallEvent::dump() const {
+  dump(llvm::errs());
+}
+
+void CallEvent::dump(raw_ostream &Out) const {
+  ASTContext &Ctx = getState()->getStateManager().getContext();
+  if (const Expr *E = getOriginExpr()) {
+    E->printPretty(Out, 0, Ctx.getPrintingPolicy());
+    Out << "\n";
+    return;
+  }
+
+  if (const Decl *D = getDecl()) {
+    Out << "Call to ";
+    D->print(Out, Ctx.getPrintingPolicy());
+    return;
+  }
+
+  // FIXME: a string representation of the kind would be nice.
+  Out << "Unknown call (type " << getKind() << ")";
+}
+
+
+bool CallEvent::isCallStmt(const Stmt *S) {
+  return isa<CallExpr>(S) || isa<ObjCMessageExpr>(S)
+                          || isa<CXXConstructExpr>(S)
+                          || isa<CXXNewExpr>(S);
+}
+
+QualType CallEvent::getDeclaredResultType(const Decl *D) {
+  assert(D);
+  if (const FunctionDecl* FD = dyn_cast<FunctionDecl>(D))
+    return FD->getResultType();
+  if (const ObjCMethodDecl* MD = dyn_cast<ObjCMethodDecl>(D))
+    return MD->getResultType();
+  if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
+    // Blocks are difficult because the return type may not be stored in the
+    // BlockDecl itself. The AST should probably be enhanced, but for now we
+    // just do what we can.
+    QualType Ty = BD->getSignatureAsWritten()->getType();
+    if (const FunctionType *FT = Ty->getAs<FunctionType>())
+      if (!FT->getResultType()->isDependentType())
+        return FT->getResultType();
+
+    return QualType();
+  }
+  
+  return QualType();
+}
+
+static void addParameterValuesToBindings(const StackFrameContext *CalleeCtx,
+                                         CallEvent::BindingsTy &Bindings,
+                                         SValBuilder &SVB,
+                                         const CallEvent &Call,
+                                         CallEvent::param_iterator I,
+                                         CallEvent::param_iterator E) {
+  MemRegionManager &MRMgr = SVB.getRegionManager();
+
+  unsigned Idx = 0;
+  for (; I != E; ++I, ++Idx) {
+    const ParmVarDecl *ParamDecl = *I;
+    assert(ParamDecl && "Formal parameter has no decl?");
+
+    SVal ArgVal = Call.getArgSVal(Idx);
+    if (!ArgVal.isUnknown()) {
+      Loc ParamLoc = SVB.makeLoc(MRMgr.getVarRegion(ParamDecl, CalleeCtx));
+      Bindings.push_back(std::make_pair(ParamLoc, ArgVal));
+    }
+  }
+
+  // FIXME: Variadic arguments are not handled at all right now.
+}
+
+
+CallEvent::param_iterator AnyFunctionCall::param_begin() const {
+  const FunctionDecl *D = getDecl();
+  if (!D)
+    return 0;
+
+  return D->param_begin();
+}
+
+CallEvent::param_iterator AnyFunctionCall::param_end() const {
+  const FunctionDecl *D = getDecl();
+  if (!D)
+    return 0;
+
+  return D->param_end();
+}
+
+void AnyFunctionCall::getInitialStackFrameContents(
+                                        const StackFrameContext *CalleeCtx,
+                                        BindingsTy &Bindings) const {
+  const FunctionDecl *D = cast<FunctionDecl>(CalleeCtx->getDecl());
+  SValBuilder &SVB = getState()->getStateManager().getSValBuilder();
+  addParameterValuesToBindings(CalleeCtx, Bindings, SVB, *this,
+                               D->param_begin(), D->param_end());
+}
+
+bool AnyFunctionCall::argumentsMayEscape() const {
+  if (hasNonZeroCallbackArg())
+    return true;
+
+  const FunctionDecl *D = getDecl();
+  if (!D)
+    return true;
+
+  const IdentifierInfo *II = D->getIdentifier();
+  if (!II)
+    return false;
+
+  // This set of "escaping" APIs is 
+
+  // - 'int pthread_setspecific(ptheread_key k, const void *)' stores a
+  //   value into thread local storage. The value can later be retrieved with
+  //   'void *ptheread_getspecific(pthread_key)'. So even thought the
+  //   parameter is 'const void *', the region escapes through the call.
+  if (II->isStr("pthread_setspecific"))
+    return true;
+
+  // - xpc_connection_set_context stores a value which can be retrieved later
+  //   with xpc_connection_get_context.
+  if (II->isStr("xpc_connection_set_context"))
+    return true;
+
+  // - funopen - sets a buffer for future IO calls.
+  if (II->isStr("funopen"))
+    return true;
+
+  StringRef FName = II->getName();
+
+  // - CoreFoundation functions that end with "NoCopy" can free a passed-in
+  //   buffer even if it is const.
+  if (FName.endswith("NoCopy"))
+    return true;
+
+  // - NSXXInsertXX, for example NSMapInsertIfAbsent, since they can
+  //   be deallocated by NSMapRemove.
+  if (FName.startswith("NS") && (FName.find("Insert") != StringRef::npos))
+    return true;
+
+  // - Many CF containers allow objects to escape through custom
+  //   allocators/deallocators upon container construction. (PR12101)
+  if (FName.startswith("CF") || FName.startswith("CG")) {
+    return StrInStrNoCase(FName, "InsertValue")  != StringRef::npos ||
+           StrInStrNoCase(FName, "AddValue")     != StringRef::npos ||
+           StrInStrNoCase(FName, "SetValue")     != StringRef::npos ||
+           StrInStrNoCase(FName, "WithData")     != StringRef::npos ||
+           StrInStrNoCase(FName, "AppendValue")  != StringRef::npos ||
+           StrInStrNoCase(FName, "SetAttribute") != StringRef::npos;
+  }
+
+  return false;
+}
+
+
+const FunctionDecl *SimpleCall::getDecl() const {
+  const FunctionDecl *D = getOriginExpr()->getDirectCallee();
+  if (D)
+    return D;
+
+  return getSVal(getOriginExpr()->getCallee()).getAsFunctionDecl();
+}
+
+
+const FunctionDecl *CXXInstanceCall::getDecl() const {
+  const CallExpr *CE = cast_or_null<CallExpr>(getOriginExpr());
+  if (!CE)
+    return AnyFunctionCall::getDecl();
+
+  const FunctionDecl *D = CE->getDirectCallee();
+  if (D)
+    return D;
+
+  return getSVal(CE->getCallee()).getAsFunctionDecl();
+}
+
+void CXXInstanceCall::getExtraInvalidatedValues(ValueList &Values) const {
+  Values.push_back(getCXXThisVal());
+}
+
+SVal CXXInstanceCall::getCXXThisVal() const {
+  const Expr *Base = getCXXThisExpr();
+  // FIXME: This doesn't handle an overloaded ->* operator.
+  if (!Base)
+    return UnknownVal();
+
+  SVal ThisVal = getSVal(Base);
+  assert(ThisVal.isUnknownOrUndef() || ThisVal.getAs<Loc>());
+  return ThisVal;
+}
+
+
+RuntimeDefinition CXXInstanceCall::getRuntimeDefinition() const {
+  // Do we have a decl at all?
+  const Decl *D = getDecl();
+  if (!D)
+    return RuntimeDefinition();
+
+  // If the method is non-virtual, we know we can inline it.
+  const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
+  if (!MD->isVirtual())
+    return AnyFunctionCall::getRuntimeDefinition();
+
+  // Do we know the implicit 'this' object being called?
+  const MemRegion *R = getCXXThisVal().getAsRegion();
+  if (!R)
+    return RuntimeDefinition();
+
+  // Do we know anything about the type of 'this'?
+  DynamicTypeInfo DynType = getState()->getDynamicTypeInfo(R);
+  if (!DynType.isValid())
+    return RuntimeDefinition();
+
+  // Is the type a C++ class? (This is mostly a defensive check.)
+  QualType RegionType = DynType.getType()->getPointeeType();
+  assert(!RegionType.isNull() && "DynamicTypeInfo should always be a pointer.");
+
+  const CXXRecordDecl *RD = RegionType->getAsCXXRecordDecl();
+  if (!RD || !RD->hasDefinition())
+    return RuntimeDefinition();
+
+  // Find the decl for this method in that class.
+  const CXXMethodDecl *Result = MD->getCorrespondingMethodInClass(RD, true);
+  if (!Result) {
+    // We might not even get the original statically-resolved method due to
+    // some particularly nasty casting (e.g. casts to sister classes).
+    // However, we should at least be able to search up and down our own class
+    // hierarchy, and some real bugs have been caught by checking this.
+    assert(!RD->isDerivedFrom(MD->getParent()) && "Couldn't find known method");
+    
+    // FIXME: This is checking that our DynamicTypeInfo is at least as good as
+    // the static type. However, because we currently don't update
+    // DynamicTypeInfo when an object is cast, we can't actually be sure the
+    // DynamicTypeInfo is up to date. This assert should be re-enabled once
+    // this is fixed. <rdar://problem/12287087>
+    //assert(!MD->getParent()->isDerivedFrom(RD) && "Bad DynamicTypeInfo");
+
+    return RuntimeDefinition();
+  }
+
+  // Does the decl that we found have an implementation?
+  const FunctionDecl *Definition;
+  if (!Result->hasBody(Definition))
+    return RuntimeDefinition();
+
+  // We found a definition. If we're not sure that this devirtualization is
+  // actually what will happen at runtime, make sure to provide the region so
+  // that ExprEngine can decide what to do with it.
+  if (DynType.canBeASubClass())
+    return RuntimeDefinition(Definition, R->StripCasts());
+  return RuntimeDefinition(Definition, /*DispatchRegion=*/0);
+}
+
+void CXXInstanceCall::getInitialStackFrameContents(
+                                            const StackFrameContext *CalleeCtx,
+                                            BindingsTy &Bindings) const {
+  AnyFunctionCall::getInitialStackFrameContents(CalleeCtx, Bindings);
+
+  // Handle the binding of 'this' in the new stack frame.
+  SVal ThisVal = getCXXThisVal();
+  if (!ThisVal.isUnknown()) {
+    ProgramStateManager &StateMgr = getState()->getStateManager();
+    SValBuilder &SVB = StateMgr.getSValBuilder();
+
+    const CXXMethodDecl *MD = cast<CXXMethodDecl>(CalleeCtx->getDecl());
+    Loc ThisLoc = SVB.getCXXThis(MD, CalleeCtx);
+
+    // If we devirtualized to a different member function, we need to make sure
+    // we have the proper layering of CXXBaseObjectRegions.
+    if (MD->getCanonicalDecl() != getDecl()->getCanonicalDecl()) {
+      ASTContext &Ctx = SVB.getContext();
+      const CXXRecordDecl *Class = MD->getParent();
+      QualType Ty = Ctx.getPointerType(Ctx.getRecordType(Class));
+
+      // FIXME: CallEvent maybe shouldn't be directly accessing StoreManager.
+      bool Failed;
+      ThisVal = StateMgr.getStoreManager().evalDynamicCast(ThisVal, Ty, Failed);
+      assert(!Failed && "Calling an incorrectly devirtualized method");
+    }
+
+    if (!ThisVal.isUnknown())
+      Bindings.push_back(std::make_pair(ThisLoc, ThisVal));
+  }
+}
+
+
+
+const Expr *CXXMemberCall::getCXXThisExpr() const {
+  return getOriginExpr()->getImplicitObjectArgument();
+}
+
+RuntimeDefinition CXXMemberCall::getRuntimeDefinition() const {
+  // C++11 [expr.call]p1: ...If the selected function is non-virtual, or if the
+  // id-expression in the class member access expression is a qualified-id,
+  // that function is called. Otherwise, its final overrider in the dynamic type
+  // of the object expression is called.
+  if (const MemberExpr *ME = dyn_cast<MemberExpr>(getOriginExpr()->getCallee()))
+    if (ME->hasQualifier())
+      return AnyFunctionCall::getRuntimeDefinition();
+  
+  return CXXInstanceCall::getRuntimeDefinition();
+}
+
+
+const Expr *CXXMemberOperatorCall::getCXXThisExpr() const {
+  return getOriginExpr()->getArg(0);
+}
+
+
+const BlockDataRegion *BlockCall::getBlockRegion() const {
+  const Expr *Callee = getOriginExpr()->getCallee();
+  const MemRegion *DataReg = getSVal(Callee).getAsRegion();
+
+  return dyn_cast_or_null<BlockDataRegion>(DataReg);
+}
+
+CallEvent::param_iterator BlockCall::param_begin() const {
+  const BlockDecl *D = getBlockDecl();
+  if (!D)
+    return 0;
+  return D->param_begin();
+}
+
+CallEvent::param_iterator BlockCall::param_end() const {
+  const BlockDecl *D = getBlockDecl();
+  if (!D)
+    return 0;
+  return D->param_end();
+}
+
+void BlockCall::getExtraInvalidatedValues(ValueList &Values) const {
+  // FIXME: This also needs to invalidate captured globals.
+  if (const MemRegion *R = getBlockRegion())
+    Values.push_back(loc::MemRegionVal(R));
+}
+
+void BlockCall::getInitialStackFrameContents(const StackFrameContext *CalleeCtx,
+                                             BindingsTy &Bindings) const {
+  const BlockDecl *D = cast<BlockDecl>(CalleeCtx->getDecl());
+  SValBuilder &SVB = getState()->getStateManager().getSValBuilder();
+  addParameterValuesToBindings(CalleeCtx, Bindings, SVB, *this,
+                               D->param_begin(), D->param_end());
+}
+
+
+SVal CXXConstructorCall::getCXXThisVal() const {
+  if (Data)
+    return loc::MemRegionVal(static_cast<const MemRegion *>(Data));
+  return UnknownVal();
+}
+
+void CXXConstructorCall::getExtraInvalidatedValues(ValueList &Values) const {
+  if (Data)
+    Values.push_back(loc::MemRegionVal(static_cast<const MemRegion *>(Data)));
+}
+
+void CXXConstructorCall::getInitialStackFrameContents(
+                                             const StackFrameContext *CalleeCtx,
+                                             BindingsTy &Bindings) const {
+  AnyFunctionCall::getInitialStackFrameContents(CalleeCtx, Bindings);
+
+  SVal ThisVal = getCXXThisVal();
+  if (!ThisVal.isUnknown()) {
+    SValBuilder &SVB = getState()->getStateManager().getSValBuilder();
+    const CXXMethodDecl *MD = cast<CXXMethodDecl>(CalleeCtx->getDecl());
+    Loc ThisLoc = SVB.getCXXThis(MD, CalleeCtx);
+    Bindings.push_back(std::make_pair(ThisLoc, ThisVal));
+  }
+}
+
+
+
+SVal CXXDestructorCall::getCXXThisVal() const {
+  if (Data)
+    return loc::MemRegionVal(DtorDataTy::getFromOpaqueValue(Data).getPointer());
+  return UnknownVal();
+}
+
+RuntimeDefinition CXXDestructorCall::getRuntimeDefinition() const {
+  // Base destructors are always called non-virtually.
+  // Skip CXXInstanceCall's devirtualization logic in this case.
+  if (isBaseDestructor())
+    return AnyFunctionCall::getRuntimeDefinition();
+
+  return CXXInstanceCall::getRuntimeDefinition();
+}
+
+
+CallEvent::param_iterator ObjCMethodCall::param_begin() const {
+  const ObjCMethodDecl *D = getDecl();
+  if (!D)
+    return 0;
+
+  return D->param_begin();
+}
+
+CallEvent::param_iterator ObjCMethodCall::param_end() const {
+  const ObjCMethodDecl *D = getDecl();
+  if (!D)
+    return 0;
+
+  return D->param_end();
+}
+
+void
+ObjCMethodCall::getExtraInvalidatedValues(ValueList &Values) const {
+  Values.push_back(getReceiverSVal());
+}
+
+SVal ObjCMethodCall::getSelfSVal() const {
+  const LocationContext *LCtx = getLocationContext();
+  const ImplicitParamDecl *SelfDecl = LCtx->getSelfDecl();
+  if (!SelfDecl)
+    return SVal();
+  return getState()->getSVal(getState()->getRegion(SelfDecl, LCtx));
+}
+
+SVal ObjCMethodCall::getReceiverSVal() const {
+  // FIXME: Is this the best way to handle class receivers?
+  if (!isInstanceMessage())
+    return UnknownVal();
+    
+  if (const Expr *RecE = getOriginExpr()->getInstanceReceiver())
+    return getSVal(RecE);
+
+  // An instance message with no expression means we are sending to super.
+  // In this case the object reference is the same as 'self'.
+  assert(getOriginExpr()->getReceiverKind() == ObjCMessageExpr::SuperInstance);
+  SVal SelfVal = getSelfSVal();
+  assert(SelfVal.isValid() && "Calling super but not in ObjC method");
+  return SelfVal;
+}
+
+bool ObjCMethodCall::isReceiverSelfOrSuper() const {
+  if (getOriginExpr()->getReceiverKind() == ObjCMessageExpr::SuperInstance ||
+      getOriginExpr()->getReceiverKind() == ObjCMessageExpr::SuperClass)
+      return true;
+
+  if (!isInstanceMessage())
+    return false;
+
+  SVal RecVal = getSVal(getOriginExpr()->getInstanceReceiver());
+
+  return (RecVal == getSelfSVal());
+}
+
+SourceRange ObjCMethodCall::getSourceRange() const {
+  switch (getMessageKind()) {
+  case OCM_Message:
+    return getOriginExpr()->getSourceRange();
+  case OCM_PropertyAccess:
+  case OCM_Subscript:
+    return getContainingPseudoObjectExpr()->getSourceRange();
+  }
+  llvm_unreachable("unknown message kind");
+}
+
+typedef llvm::PointerIntPair<const PseudoObjectExpr *, 2> ObjCMessageDataTy;
+
+const PseudoObjectExpr *ObjCMethodCall::getContainingPseudoObjectExpr() const {
+  assert(Data != 0 && "Lazy lookup not yet performed.");
+  assert(getMessageKind() != OCM_Message && "Explicit message send.");
+  return ObjCMessageDataTy::getFromOpaqueValue(Data).getPointer();
+}
+
+ObjCMessageKind ObjCMethodCall::getMessageKind() const {
+  if (Data == 0) {
+    ParentMap &PM = getLocationContext()->getParentMap();
+    const Stmt *S = PM.getParent(getOriginExpr());
+    if (const PseudoObjectExpr *POE = dyn_cast_or_null<PseudoObjectExpr>(S)) {
+      const Expr *Syntactic = POE->getSyntacticForm();
+
+      // This handles the funny case of assigning to the result of a getter.
+      // This can happen if the getter returns a non-const reference.
+      if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(Syntactic))
+        Syntactic = BO->getLHS();
+
+      ObjCMessageKind K;
+      switch (Syntactic->getStmtClass()) {
+      case Stmt::ObjCPropertyRefExprClass:
+        K = OCM_PropertyAccess;
+        break;
+      case Stmt::ObjCSubscriptRefExprClass:
+        K = OCM_Subscript;
+        break;
+      default:
+        // FIXME: Can this ever happen?
+        K = OCM_Message;
+        break;
+      }
+
+      if (K != OCM_Message) {
+        const_cast<ObjCMethodCall *>(this)->Data
+          = ObjCMessageDataTy(POE, K).getOpaqueValue();
+        assert(getMessageKind() == K);
+        return K;
+      }
+    }
+    
+    const_cast<ObjCMethodCall *>(this)->Data
+      = ObjCMessageDataTy(0, 1).getOpaqueValue();
+    assert(getMessageKind() == OCM_Message);
+    return OCM_Message;
+  }
+
+  ObjCMessageDataTy Info = ObjCMessageDataTy::getFromOpaqueValue(Data);
+  if (!Info.getPointer())
+    return OCM_Message;
+  return static_cast<ObjCMessageKind>(Info.getInt());
+}
+
+
+bool ObjCMethodCall::canBeOverridenInSubclass(ObjCInterfaceDecl *IDecl,
+                                             Selector Sel) const {
+  assert(IDecl);
+  const SourceManager &SM =
+    getState()->getStateManager().getContext().getSourceManager();
+
+  // If the class interface is declared inside the main file, assume it is not
+  // subcassed. 
+  // TODO: It could actually be subclassed if the subclass is private as well.
+  // This is probably very rare.
+  SourceLocation InterfLoc = IDecl->getEndOfDefinitionLoc();
+  if (InterfLoc.isValid() && SM.isFromMainFile(InterfLoc))
+    return false;
+
+  // Assume that property accessors are not overridden.
+  if (getMessageKind() == OCM_PropertyAccess)
+    return false;
+
+  // We assume that if the method is public (declared outside of main file) or
+  // has a parent which publicly declares the method, the method could be
+  // overridden in a subclass.
+
+  // Find the first declaration in the class hierarchy that declares
+  // the selector.
+  ObjCMethodDecl *D = 0;
+  while (true) {
+    D = IDecl->lookupMethod(Sel, true);
+
+    // Cannot find a public definition.
+    if (!D)
+      return false;
+
+    // If outside the main file,
+    if (D->getLocation().isValid() && !SM.isFromMainFile(D->getLocation()))
+      return true;
+
+    if (D->isOverriding()) {
+      // Search in the superclass on the next iteration.
+      IDecl = D->getClassInterface();
+      if (!IDecl)
+        return false;
+
+      IDecl = IDecl->getSuperClass();
+      if (!IDecl)
+        return false;
+
+      continue;
+    }
+
+    return false;
+  };
+
+  llvm_unreachable("The while loop should always terminate.");
+}
+
+RuntimeDefinition ObjCMethodCall::getRuntimeDefinition() const {
+  const ObjCMessageExpr *E = getOriginExpr();
+  assert(E);
+  Selector Sel = E->getSelector();
+
+  if (E->isInstanceMessage()) {
+
+    // Find the the receiver type.
+    const ObjCObjectPointerType *ReceiverT = 0;
+    bool CanBeSubClassed = false;
+    QualType SupersType = E->getSuperType();
+    const MemRegion *Receiver = 0;
+
+    if (!SupersType.isNull()) {
+      // Super always means the type of immediate predecessor to the method
+      // where the call occurs.
+      ReceiverT = cast<ObjCObjectPointerType>(SupersType);
+    } else {
+      Receiver = getReceiverSVal().getAsRegion();
+      if (!Receiver)
+        return RuntimeDefinition();
+
+      DynamicTypeInfo DTI = getState()->getDynamicTypeInfo(Receiver);
+      QualType DynType = DTI.getType();
+      CanBeSubClassed = DTI.canBeASubClass();
+      ReceiverT = dyn_cast<ObjCObjectPointerType>(DynType);
+
+      if (ReceiverT && CanBeSubClassed)
+        if (ObjCInterfaceDecl *IDecl = ReceiverT->getInterfaceDecl())
+          if (!canBeOverridenInSubclass(IDecl, Sel))
+            CanBeSubClassed = false;
+    }
+
+    // Lookup the method implementation.
+    if (ReceiverT)
+      if (ObjCInterfaceDecl *IDecl = ReceiverT->getInterfaceDecl()) {
+        // Repeatedly calling lookupPrivateMethod() is expensive, especially
+        // when in many cases it returns null.  We cache the results so
+        // that repeated queries on the same ObjCIntefaceDecl and Selector
+        // don't incur the same cost.  On some test cases, we can see the
+        // same query being issued thousands of times.
+        //
+        // NOTE: This cache is essentially a "global" variable, but it
+        // only gets lazily created when we get here.  The value of the
+        // cache probably comes from it being global across ExprEngines,
+        // where the same queries may get issued.  If we are worried about
+        // concurrency, or possibly loading/unloading ASTs, etc., we may
+        // need to revisit this someday.  In terms of memory, this table
+        // stays around until clang quits, which also may be bad if we
+        // need to release memory.
+        typedef std::pair<const ObjCInterfaceDecl*, Selector>
+                PrivateMethodKey;
+        typedef llvm::DenseMap<PrivateMethodKey,
+                               Optional<const ObjCMethodDecl *> >
+                PrivateMethodCache;
+
+        static PrivateMethodCache PMC;
+        Optional<const ObjCMethodDecl *> &Val = PMC[std::make_pair(IDecl, Sel)];
+
+        // Query lookupPrivateMethod() if the cache does not hit.
+        if (!Val.hasValue())
+          Val = IDecl->lookupPrivateMethod(Sel);
+
+        const ObjCMethodDecl *MD = Val.getValue();
+        if (CanBeSubClassed)
+          return RuntimeDefinition(MD, Receiver);
+        else
+          return RuntimeDefinition(MD, 0);
+      }
+
+  } else {
+    // This is a class method.
+    // If we have type info for the receiver class, we are calling via
+    // class name.
+    if (ObjCInterfaceDecl *IDecl = E->getReceiverInterface()) {
+      // Find/Return the method implementation.
+      return RuntimeDefinition(IDecl->lookupPrivateClassMethod(Sel));
+    }
+  }
+
+  return RuntimeDefinition();
+}
+
+void ObjCMethodCall::getInitialStackFrameContents(
+                                             const StackFrameContext *CalleeCtx,
+                                             BindingsTy &Bindings) const {
+  const ObjCMethodDecl *D = cast<ObjCMethodDecl>(CalleeCtx->getDecl());
+  SValBuilder &SVB = getState()->getStateManager().getSValBuilder();
+  addParameterValuesToBindings(CalleeCtx, Bindings, SVB, *this,
+                               D->param_begin(), D->param_end());
+
+  SVal SelfVal = getReceiverSVal();
+  if (!SelfVal.isUnknown()) {
+    const VarDecl *SelfD = CalleeCtx->getAnalysisDeclContext()->getSelfDecl();
+    MemRegionManager &MRMgr = SVB.getRegionManager();
+    Loc SelfLoc = SVB.makeLoc(MRMgr.getVarRegion(SelfD, CalleeCtx));
+    Bindings.push_back(std::make_pair(SelfLoc, SelfVal));
+  }
+}
+
+CallEventRef<>
+CallEventManager::getSimpleCall(const CallExpr *CE, ProgramStateRef State,
+                                const LocationContext *LCtx) {
+  if (const CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(CE))
+    return create<CXXMemberCall>(MCE, State, LCtx);
+
+  if (const CXXOperatorCallExpr *OpCE = dyn_cast<CXXOperatorCallExpr>(CE)) {
+    const FunctionDecl *DirectCallee = OpCE->getDirectCallee();
+    if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(DirectCallee))
+      if (MD->isInstance())
+        return create<CXXMemberOperatorCall>(OpCE, State, LCtx);
+
+  } else if (CE->getCallee()->getType()->isBlockPointerType()) {
+    return create<BlockCall>(CE, State, LCtx);
+  }
+
+  // Otherwise, it's a normal function call, static member function call, or
+  // something we can't reason about.
+  return create<FunctionCall>(CE, State, LCtx);
+}
+
+
+CallEventRef<>
+CallEventManager::getCaller(const StackFrameContext *CalleeCtx,
+                            ProgramStateRef State) {
+  const LocationContext *ParentCtx = CalleeCtx->getParent();
+  const LocationContext *CallerCtx = ParentCtx->getCurrentStackFrame();
+  assert(CallerCtx && "This should not be used for top-level stack frames");
+
+  const Stmt *CallSite = CalleeCtx->getCallSite();
+
+  if (CallSite) {
+    if (const CallExpr *CE = dyn_cast<CallExpr>(CallSite))
+      return getSimpleCall(CE, State, CallerCtx);
+
+    switch (CallSite->getStmtClass()) {
+    case Stmt::CXXConstructExprClass:
+    case Stmt::CXXTemporaryObjectExprClass: {
+      SValBuilder &SVB = State->getStateManager().getSValBuilder();
+      const CXXMethodDecl *Ctor = cast<CXXMethodDecl>(CalleeCtx->getDecl());
+      Loc ThisPtr = SVB.getCXXThis(Ctor, CalleeCtx);
+      SVal ThisVal = State->getSVal(ThisPtr);
+
+      return getCXXConstructorCall(cast<CXXConstructExpr>(CallSite),
+                                   ThisVal.getAsRegion(), State, CallerCtx);
+    }
+    case Stmt::CXXNewExprClass:
+      return getCXXAllocatorCall(cast<CXXNewExpr>(CallSite), State, CallerCtx);
+    case Stmt::ObjCMessageExprClass:
+      return getObjCMethodCall(cast<ObjCMessageExpr>(CallSite),
+                               State, CallerCtx);
+    default:
+      llvm_unreachable("This is not an inlineable statement.");
+    }
+  }
+
+  // Fall back to the CFG. The only thing we haven't handled yet is
+  // destructors, though this could change in the future.
+  const CFGBlock *B = CalleeCtx->getCallSiteBlock();
+  CFGElement E = (*B)[CalleeCtx->getIndex()];
+  assert(E.getAs<CFGImplicitDtor>() &&
+         "All other CFG elements should have exprs");
+  assert(!E.getAs<CFGTemporaryDtor>() && "We don't handle temporaries yet");
+
+  SValBuilder &SVB = State->getStateManager().getSValBuilder();
+  const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CalleeCtx->getDecl());
+  Loc ThisPtr = SVB.getCXXThis(Dtor, CalleeCtx);
+  SVal ThisVal = State->getSVal(ThisPtr);
+
+  const Stmt *Trigger;
+  if (Optional<CFGAutomaticObjDtor> AutoDtor = E.getAs<CFGAutomaticObjDtor>())
+    Trigger = AutoDtor->getTriggerStmt();
+  else
+    Trigger = Dtor->getBody();
+
+  return getCXXDestructorCall(Dtor, Trigger, ThisVal.getAsRegion(),
+                              E.getAs<CFGBaseDtor>().hasValue(), State,
+                              CallerCtx);
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/Checker.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/Checker.cpp
new file mode 100644
index 0000000..07e0aac
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/Checker.cpp
@@ -0,0 +1,31 @@
+//== Checker.cpp - Registration mechanism for checkers -----------*- C++ -*--=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines Checker, used to create and register checkers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+
+using namespace clang;
+using namespace ento;
+
+StringRef CheckerBase::getTagDescription() const {
+  // FIXME: We want to return the package + name of the checker here.
+  return "A Checker";  
+}
+
+void Checker<check::_VoidCheck, check::_VoidCheck, check::_VoidCheck,
+             check::_VoidCheck, check::_VoidCheck, check::_VoidCheck,
+             check::_VoidCheck, check::_VoidCheck, check::_VoidCheck,
+             check::_VoidCheck, check::_VoidCheck, check::_VoidCheck,
+             check::_VoidCheck, check::_VoidCheck, check::_VoidCheck,
+             check::_VoidCheck, check::_VoidCheck, check::_VoidCheck
+             >::anchor() { }
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
new file mode 100644
index 0000000..74eeef1
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
@@ -0,0 +1,98 @@
+//== CheckerContext.cpp - Context info for path-sensitive checkers-----------=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines CheckerContext that provides contextual info for
+//  path-sensitive checkers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Lex/Lexer.h"
+
+using namespace clang;
+using namespace ento;
+
+const FunctionDecl *CheckerContext::getCalleeDecl(const CallExpr *CE) const {
+  ProgramStateRef State = getState();
+  const Expr *Callee = CE->getCallee();
+  SVal L = State->getSVal(Callee, Pred->getLocationContext());
+  return L.getAsFunctionDecl();
+}
+
+StringRef CheckerContext::getCalleeName(const FunctionDecl *FunDecl) const {
+  if (!FunDecl)
+    return StringRef();
+  IdentifierInfo *funI = FunDecl->getIdentifier();
+  if (!funI)
+    return StringRef();
+  return funI->getName();
+}
+
+
+bool CheckerContext::isCLibraryFunction(const FunctionDecl *FD,
+                                        StringRef Name) {
+  // To avoid false positives (Ex: finding user defined functions with
+  // similar names), only perform fuzzy name matching when it's a builtin.
+  // Using a string compare is slow, we might want to switch on BuiltinID here.
+  unsigned BId = FD->getBuiltinID();
+  if (BId != 0) {
+    if (Name.empty())
+      return true;
+    StringRef BName = FD->getASTContext().BuiltinInfo.GetName(BId);
+    if (BName.find(Name) != StringRef::npos)
+      return true;
+  }
+
+  const IdentifierInfo *II = FD->getIdentifier();
+  // If this is a special C++ name without IdentifierInfo, it can't be a
+  // C library function.
+  if (!II)
+    return false;
+
+  // Look through 'extern "C"' and anything similar invented in the future.
+  const DeclContext *DC = FD->getDeclContext();
+  while (DC->isTransparentContext())
+    DC = DC->getParent();
+
+  // If this function is in a namespace, it is not a C library function.
+  if (!DC->isTranslationUnit())
+    return false;
+
+  // If this function is not externally visible, it is not a C library function.
+  // Note that we make an exception for inline functions, which may be
+  // declared in header files without external linkage.
+  if (!FD->isInlined() && FD->getLinkage() != ExternalLinkage)
+    return false;
+
+  if (Name.empty())
+    return true;
+
+  StringRef FName = II->getName();
+  if (FName.equals(Name))
+    return true;
+
+  if (FName.startswith("__inline") && (FName.find(Name) != StringRef::npos))
+    return true;
+
+  if (FName.startswith("__") && FName.endswith("_chk") &&
+      FName.find(Name) != StringRef::npos)
+    return true;
+
+  return false;
+}
+
+StringRef CheckerContext::getMacroNameOrSpelling(SourceLocation &Loc) {
+  if (Loc.isMacroID())
+    return Lexer::getImmediateMacroName(Loc, getSourceManager(),
+                                             getLangOpts());
+  SmallVector<char, 16> buf;
+  return Lexer::getSpelling(Loc, buf, getSourceManager(), getLangOpts());
+}
+
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
new file mode 100644
index 0000000..28df695
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
@@ -0,0 +1,80 @@
+//===---- CheckerHelpers.cpp - Helper functions for checkers ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines several static functions for use in checkers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
+#include "clang/AST/Expr.h"
+
+// Recursively find any substatements containing macros
+bool clang::ento::containsMacro(const Stmt *S) {
+  if (S->getLocStart().isMacroID())
+    return true;
+
+  if (S->getLocEnd().isMacroID())
+    return true;
+
+  for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end();
+      ++I)
+    if (const Stmt *child = *I)
+      if (containsMacro(child))
+        return true;
+
+  return false;
+}
+
+// Recursively find any substatements containing enum constants
+bool clang::ento::containsEnum(const Stmt *S) {
+  const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(S);
+
+  if (DR && isa<EnumConstantDecl>(DR->getDecl()))
+    return true;
+
+  for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end();
+      ++I)
+    if (const Stmt *child = *I)
+      if (containsEnum(child))
+        return true;
+
+  return false;
+}
+
+// Recursively find any substatements containing static vars
+bool clang::ento::containsStaticLocal(const Stmt *S) {
+  const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(S);
+
+  if (DR)
+    if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl()))
+      if (VD->isStaticLocal())
+        return true;
+
+  for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end();
+      ++I)
+    if (const Stmt *child = *I)
+      if (containsStaticLocal(child))
+        return true;
+
+  return false;
+}
+
+// Recursively find any substatements containing __builtin_offsetof
+bool clang::ento::containsBuiltinOffsetOf(const Stmt *S) {
+  if (isa<OffsetOfExpr>(S))
+    return true;
+
+  for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end();
+      ++I)
+    if (const Stmt *child = *I)
+      if (containsBuiltinOffsetOf(child))
+        return true;
+
+  return false;
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
new file mode 100644
index 0000000..8adf326
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
@@ -0,0 +1,717 @@
+//===--- CheckerManager.cpp - Static Analyzer Checker Manager -------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines the Static Analyzer Checker Manager.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/Analysis/ProgramPoint.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+bool CheckerManager::hasPathSensitiveCheckers() const {
+  return !StmtCheckers.empty()              ||
+         !PreObjCMessageCheckers.empty()    ||
+         !PostObjCMessageCheckers.empty()   ||
+         !PreCallCheckers.empty()    ||
+         !PostCallCheckers.empty()   ||
+         !LocationCheckers.empty()          ||
+         !BindCheckers.empty()              ||
+         !EndAnalysisCheckers.empty()       ||
+         !EndFunctionCheckers.empty()           ||
+         !BranchConditionCheckers.empty()   ||
+         !LiveSymbolsCheckers.empty()       ||
+         !DeadSymbolsCheckers.empty()       ||
+         !RegionChangesCheckers.empty()     ||
+         !EvalAssumeCheckers.empty()        ||
+         !EvalCallCheckers.empty();
+}
+
+void CheckerManager::finishedCheckerRegistration() {
+#ifndef NDEBUG
+  // Make sure that for every event that has listeners, there is at least
+  // one dispatcher registered for it.
+  for (llvm::DenseMap<EventTag, EventInfo>::iterator
+         I = Events.begin(), E = Events.end(); I != E; ++I)
+    assert(I->second.HasDispatcher && "No dispatcher registered for an event");
+#endif
+}
+
+//===----------------------------------------------------------------------===//
+// Functions for running checkers for AST traversing..
+//===----------------------------------------------------------------------===//
+
+void CheckerManager::runCheckersOnASTDecl(const Decl *D, AnalysisManager& mgr,
+                                          BugReporter &BR) {
+  assert(D);
+
+  unsigned DeclKind = D->getKind();
+  CachedDeclCheckers *checkers = 0;
+  CachedDeclCheckersMapTy::iterator CCI = CachedDeclCheckersMap.find(DeclKind);
+  if (CCI != CachedDeclCheckersMap.end()) {
+    checkers = &(CCI->second);
+  } else {
+    // Find the checkers that should run for this Decl and cache them.
+    checkers = &CachedDeclCheckersMap[DeclKind];
+    for (unsigned i = 0, e = DeclCheckers.size(); i != e; ++i) {
+      DeclCheckerInfo &info = DeclCheckers[i];
+      if (info.IsForDeclFn(D))
+        checkers->push_back(info.CheckFn);
+    }
+  }
+
+  assert(checkers);
+  for (CachedDeclCheckers::iterator
+         I = checkers->begin(), E = checkers->end(); I != E; ++I)
+    (*I)(D, mgr, BR);
+}
+
+void CheckerManager::runCheckersOnASTBody(const Decl *D, AnalysisManager& mgr,
+                                          BugReporter &BR) {
+  assert(D && D->hasBody());
+
+  for (unsigned i = 0, e = BodyCheckers.size(); i != e; ++i)
+    BodyCheckers[i](D, mgr, BR);
+}
+
+//===----------------------------------------------------------------------===//
+// Functions for running checkers for path-sensitive checking.
+//===----------------------------------------------------------------------===//
+
+template <typename CHECK_CTX>
+static void expandGraphWithCheckers(CHECK_CTX checkCtx,
+                                    ExplodedNodeSet &Dst,
+                                    const ExplodedNodeSet &Src) {
+  const NodeBuilderContext &BldrCtx = checkCtx.Eng.getBuilderContext();
+  if (Src.empty())
+    return;
+
+  typename CHECK_CTX::CheckersTy::const_iterator
+      I = checkCtx.checkers_begin(), E = checkCtx.checkers_end();
+  if (I == E) {
+    Dst.insert(Src);
+    return;
+  }
+
+  ExplodedNodeSet Tmp1, Tmp2;
+  const ExplodedNodeSet *PrevSet = &Src;
+
+  for (; I != E; ++I) {
+    ExplodedNodeSet *CurrSet = 0;
+    if (I+1 == E)
+      CurrSet = &Dst;
+    else {
+      CurrSet = (PrevSet == &Tmp1) ? &Tmp2 : &Tmp1;
+      CurrSet->clear();
+    }
+
+    NodeBuilder B(*PrevSet, *CurrSet, BldrCtx);
+    for (ExplodedNodeSet::iterator NI = PrevSet->begin(), NE = PrevSet->end();
+         NI != NE; ++NI) {
+      checkCtx.runChecker(*I, B, *NI);
+    }
+
+    // If all the produced transitions are sinks, stop.
+    if (CurrSet->empty())
+      return;
+
+    // Update which NodeSet is the current one.
+    PrevSet = CurrSet;
+  }
+}
+
+namespace {
+  struct CheckStmtContext {
+    typedef SmallVectorImpl<CheckerManager::CheckStmtFunc> CheckersTy;
+    bool IsPreVisit;
+    const CheckersTy &Checkers;
+    const Stmt *S;
+    ExprEngine &Eng;
+    bool WasInlined;
+
+    CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
+    CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
+
+    CheckStmtContext(bool isPreVisit, const CheckersTy &checkers,
+                     const Stmt *s, ExprEngine &eng, bool wasInlined = false)
+      : IsPreVisit(isPreVisit), Checkers(checkers), S(s), Eng(eng),
+        WasInlined(wasInlined) {}
+
+    void runChecker(CheckerManager::CheckStmtFunc checkFn,
+                    NodeBuilder &Bldr, ExplodedNode *Pred) {
+      // FIXME: Remove respondsToCallback from CheckerContext;
+      ProgramPoint::Kind K =  IsPreVisit ? ProgramPoint::PreStmtKind :
+                                           ProgramPoint::PostStmtKind;
+      const ProgramPoint &L = ProgramPoint::getProgramPoint(S, K,
+                                Pred->getLocationContext(), checkFn.Checker);
+      CheckerContext C(Bldr, Eng, Pred, L, WasInlined);
+      checkFn(S, C);
+    }
+  };
+}
+
+/// \brief Run checkers for visiting Stmts.
+void CheckerManager::runCheckersForStmt(bool isPreVisit,
+                                        ExplodedNodeSet &Dst,
+                                        const ExplodedNodeSet &Src,
+                                        const Stmt *S,
+                                        ExprEngine &Eng,
+                                        bool WasInlined) {
+  CheckStmtContext C(isPreVisit, *getCachedStmtCheckersFor(S, isPreVisit),
+                     S, Eng, WasInlined);
+  expandGraphWithCheckers(C, Dst, Src);
+}
+
+namespace {
+  struct CheckObjCMessageContext {
+    typedef std::vector<CheckerManager::CheckObjCMessageFunc> CheckersTy;
+    bool IsPreVisit, WasInlined;
+    const CheckersTy &Checkers;
+    const ObjCMethodCall &Msg;
+    ExprEngine &Eng;
+
+    CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
+    CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
+
+    CheckObjCMessageContext(bool isPreVisit, const CheckersTy &checkers,
+                            const ObjCMethodCall &msg, ExprEngine &eng,
+                            bool wasInlined)
+      : IsPreVisit(isPreVisit), WasInlined(wasInlined), Checkers(checkers),
+        Msg(msg), Eng(eng) { }
+
+    void runChecker(CheckerManager::CheckObjCMessageFunc checkFn,
+                    NodeBuilder &Bldr, ExplodedNode *Pred) {
+      const ProgramPoint &L = Msg.getProgramPoint(IsPreVisit,checkFn.Checker);
+      CheckerContext C(Bldr, Eng, Pred, L, WasInlined);
+
+      checkFn(*Msg.cloneWithState<ObjCMethodCall>(Pred->getState()), C);
+    }
+  };
+}
+
+/// \brief Run checkers for visiting obj-c messages.
+void CheckerManager::runCheckersForObjCMessage(bool isPreVisit,
+                                               ExplodedNodeSet &Dst,
+                                               const ExplodedNodeSet &Src,
+                                               const ObjCMethodCall &msg,
+                                               ExprEngine &Eng,
+                                               bool WasInlined) {
+  CheckObjCMessageContext C(isPreVisit,
+                            isPreVisit ? PreObjCMessageCheckers
+                                       : PostObjCMessageCheckers,
+                            msg, Eng, WasInlined);
+  expandGraphWithCheckers(C, Dst, Src);
+}
+
+namespace {
+  // FIXME: This has all the same signatures as CheckObjCMessageContext.
+  // Is there a way we can merge the two?
+  struct CheckCallContext {
+    typedef std::vector<CheckerManager::CheckCallFunc> CheckersTy;
+    bool IsPreVisit, WasInlined;
+    const CheckersTy &Checkers;
+    const CallEvent &Call;
+    ExprEngine &Eng;
+
+    CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
+    CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
+
+    CheckCallContext(bool isPreVisit, const CheckersTy &checkers,
+                     const CallEvent &call, ExprEngine &eng,
+                     bool wasInlined)
+    : IsPreVisit(isPreVisit), WasInlined(wasInlined), Checkers(checkers),
+      Call(call), Eng(eng) { }
+
+    void runChecker(CheckerManager::CheckCallFunc checkFn,
+                    NodeBuilder &Bldr, ExplodedNode *Pred) {
+      const ProgramPoint &L = Call.getProgramPoint(IsPreVisit,checkFn.Checker);
+      CheckerContext C(Bldr, Eng, Pred, L, WasInlined);
+
+      checkFn(*Call.cloneWithState(Pred->getState()), C);
+    }
+  };
+}
+
+/// \brief Run checkers for visiting an abstract call event.
+void CheckerManager::runCheckersForCallEvent(bool isPreVisit,
+                                             ExplodedNodeSet &Dst,
+                                             const ExplodedNodeSet &Src,
+                                             const CallEvent &Call,
+                                             ExprEngine &Eng,
+                                             bool WasInlined) {
+  CheckCallContext C(isPreVisit,
+                     isPreVisit ? PreCallCheckers
+                                : PostCallCheckers,
+                     Call, Eng, WasInlined);
+  expandGraphWithCheckers(C, Dst, Src);
+}
+
+namespace {
+  struct CheckLocationContext {
+    typedef std::vector<CheckerManager::CheckLocationFunc> CheckersTy;
+    const CheckersTy &Checkers;
+    SVal Loc;
+    bool IsLoad;
+    const Stmt *NodeEx; /* Will become a CFGStmt */
+    const Stmt *BoundEx;
+    ExprEngine &Eng;
+
+    CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
+    CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
+
+    CheckLocationContext(const CheckersTy &checkers,
+                         SVal loc, bool isLoad, const Stmt *NodeEx,
+                         const Stmt *BoundEx,
+                         ExprEngine &eng)
+      : Checkers(checkers), Loc(loc), IsLoad(isLoad), NodeEx(NodeEx),
+        BoundEx(BoundEx), Eng(eng) {}
+
+    void runChecker(CheckerManager::CheckLocationFunc checkFn,
+                    NodeBuilder &Bldr, ExplodedNode *Pred) {
+      ProgramPoint::Kind K =  IsLoad ? ProgramPoint::PreLoadKind :
+                                       ProgramPoint::PreStoreKind;
+      const ProgramPoint &L =
+        ProgramPoint::getProgramPoint(NodeEx, K,
+                                      Pred->getLocationContext(),
+                                      checkFn.Checker);
+      CheckerContext C(Bldr, Eng, Pred, L);
+      checkFn(Loc, IsLoad, BoundEx, C);
+    }
+  };
+}
+
+/// \brief Run checkers for load/store of a location.
+
+void CheckerManager::runCheckersForLocation(ExplodedNodeSet &Dst,
+                                            const ExplodedNodeSet &Src,
+                                            SVal location, bool isLoad,
+                                            const Stmt *NodeEx,
+                                            const Stmt *BoundEx,
+                                            ExprEngine &Eng) {
+  CheckLocationContext C(LocationCheckers, location, isLoad, NodeEx,
+                         BoundEx, Eng);
+  expandGraphWithCheckers(C, Dst, Src);
+}
+
+namespace {
+  struct CheckBindContext {
+    typedef std::vector<CheckerManager::CheckBindFunc> CheckersTy;
+    const CheckersTy &Checkers;
+    SVal Loc;
+    SVal Val;
+    const Stmt *S;
+    ExprEngine &Eng;
+    const ProgramPoint &PP;
+
+    CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
+    CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
+
+    CheckBindContext(const CheckersTy &checkers,
+                     SVal loc, SVal val, const Stmt *s, ExprEngine &eng,
+                     const ProgramPoint &pp)
+      : Checkers(checkers), Loc(loc), Val(val), S(s), Eng(eng), PP(pp) {}
+
+    void runChecker(CheckerManager::CheckBindFunc checkFn,
+                    NodeBuilder &Bldr, ExplodedNode *Pred) {
+      const ProgramPoint &L = PP.withTag(checkFn.Checker);
+      CheckerContext C(Bldr, Eng, Pred, L);
+
+      checkFn(Loc, Val, S, C);
+    }
+  };
+}
+
+/// \brief Run checkers for binding of a value to a location.
+void CheckerManager::runCheckersForBind(ExplodedNodeSet &Dst,
+                                        const ExplodedNodeSet &Src,
+                                        SVal location, SVal val,
+                                        const Stmt *S, ExprEngine &Eng,
+                                        const ProgramPoint &PP) {
+  CheckBindContext C(BindCheckers, location, val, S, Eng, PP);
+  expandGraphWithCheckers(C, Dst, Src);
+}
+
+void CheckerManager::runCheckersForEndAnalysis(ExplodedGraph &G,
+                                               BugReporter &BR,
+                                               ExprEngine &Eng) {
+  for (unsigned i = 0, e = EndAnalysisCheckers.size(); i != e; ++i)
+    EndAnalysisCheckers[i](G, BR, Eng);
+}
+
+/// \brief Run checkers for end of path.
+// Note, We do not chain the checker output (like in expandGraphWithCheckers)
+// for this callback since end of path nodes are expected to be final.
+void CheckerManager::runCheckersForEndFunction(NodeBuilderContext &BC,
+                                               ExplodedNodeSet &Dst,
+                                               ExplodedNode *Pred,
+                                               ExprEngine &Eng) {
+  
+  // We define the builder outside of the loop bacause if at least one checkers
+  // creates a sucsessor for Pred, we do not need to generate an 
+  // autotransition for it.
+  NodeBuilder Bldr(Pred, Dst, BC);
+  for (unsigned i = 0, e = EndFunctionCheckers.size(); i != e; ++i) {
+    CheckEndFunctionFunc checkFn = EndFunctionCheckers[i];
+
+    const ProgramPoint &L = BlockEntrance(BC.Block,
+                                          Pred->getLocationContext(),
+                                          checkFn.Checker);
+    CheckerContext C(Bldr, Eng, Pred, L);
+    checkFn(C);
+  }
+}
+
+namespace {
+  struct CheckBranchConditionContext {
+    typedef std::vector<CheckerManager::CheckBranchConditionFunc> CheckersTy;
+    const CheckersTy &Checkers;
+    const Stmt *Condition;
+    ExprEngine &Eng;
+
+    CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
+    CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
+
+    CheckBranchConditionContext(const CheckersTy &checkers,
+                                const Stmt *Cond, ExprEngine &eng)
+      : Checkers(checkers), Condition(Cond), Eng(eng) {}
+
+    void runChecker(CheckerManager::CheckBranchConditionFunc checkFn,
+                    NodeBuilder &Bldr, ExplodedNode *Pred) {
+      ProgramPoint L = PostCondition(Condition, Pred->getLocationContext(),
+                                     checkFn.Checker);
+      CheckerContext C(Bldr, Eng, Pred, L);
+      checkFn(Condition, C);
+    }
+  };
+}
+
+/// \brief Run checkers for branch condition.
+void CheckerManager::runCheckersForBranchCondition(const Stmt *Condition,
+                                                   ExplodedNodeSet &Dst,
+                                                   ExplodedNode *Pred,
+                                                   ExprEngine &Eng) {
+  ExplodedNodeSet Src;
+  Src.insert(Pred);
+  CheckBranchConditionContext C(BranchConditionCheckers, Condition, Eng);
+  expandGraphWithCheckers(C, Dst, Src);
+}
+
+/// \brief Run checkers for live symbols.
+void CheckerManager::runCheckersForLiveSymbols(ProgramStateRef state,
+                                               SymbolReaper &SymReaper) {
+  for (unsigned i = 0, e = LiveSymbolsCheckers.size(); i != e; ++i)
+    LiveSymbolsCheckers[i](state, SymReaper);
+}
+
+namespace {
+  struct CheckDeadSymbolsContext {
+    typedef std::vector<CheckerManager::CheckDeadSymbolsFunc> CheckersTy;
+    const CheckersTy &Checkers;
+    SymbolReaper &SR;
+    const Stmt *S;
+    ExprEngine &Eng;
+    ProgramPoint::Kind ProgarmPointKind;
+
+    CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
+    CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
+
+    CheckDeadSymbolsContext(const CheckersTy &checkers, SymbolReaper &sr,
+                            const Stmt *s, ExprEngine &eng,
+                            ProgramPoint::Kind K)
+      : Checkers(checkers), SR(sr), S(s), Eng(eng), ProgarmPointKind(K) { }
+
+    void runChecker(CheckerManager::CheckDeadSymbolsFunc checkFn,
+                    NodeBuilder &Bldr, ExplodedNode *Pred) {
+      const ProgramPoint &L = ProgramPoint::getProgramPoint(S, ProgarmPointKind,
+                                Pred->getLocationContext(), checkFn.Checker);
+      CheckerContext C(Bldr, Eng, Pred, L);
+
+      // Note, do not pass the statement to the checkers without letting them
+      // differentiate if we ran remove dead bindings before or after the
+      // statement.
+      checkFn(SR, C);
+    }
+  };
+}
+
+/// \brief Run checkers for dead symbols.
+void CheckerManager::runCheckersForDeadSymbols(ExplodedNodeSet &Dst,
+                                               const ExplodedNodeSet &Src,
+                                               SymbolReaper &SymReaper,
+                                               const Stmt *S,
+                                               ExprEngine &Eng,
+                                               ProgramPoint::Kind K) {
+  CheckDeadSymbolsContext C(DeadSymbolsCheckers, SymReaper, S, Eng, K);
+  expandGraphWithCheckers(C, Dst, Src);
+}
+
+/// \brief True if at least one checker wants to check region changes.
+bool CheckerManager::wantsRegionChangeUpdate(ProgramStateRef state) {
+  for (unsigned i = 0, e = RegionChangesCheckers.size(); i != e; ++i)
+    if (RegionChangesCheckers[i].WantUpdateFn(state))
+      return true;
+
+  return false;
+}
+
+/// \brief Run checkers for region changes.
+ProgramStateRef 
+CheckerManager::runCheckersForRegionChanges(ProgramStateRef state,
+                                    const InvalidatedSymbols *invalidated,
+                                    ArrayRef<const MemRegion *> ExplicitRegions,
+                                    ArrayRef<const MemRegion *> Regions,
+                                    const CallEvent *Call) {
+  for (unsigned i = 0, e = RegionChangesCheckers.size(); i != e; ++i) {
+    // If any checker declares the state infeasible (or if it starts that way),
+    // bail out.
+    if (!state)
+      return NULL;
+    state = RegionChangesCheckers[i].CheckFn(state, invalidated, 
+                                             ExplicitRegions, Regions, Call);
+  }
+  return state;
+}
+
+/// \brief Run checkers to process symbol escape event.
+ProgramStateRef
+CheckerManager::runCheckersForPointerEscape(ProgramStateRef State,
+                                           const InvalidatedSymbols &Escaped,
+                                           const CallEvent *Call,
+                                           PointerEscapeKind Kind,
+                                           bool IsConst) {
+  assert((Call != NULL ||
+          (Kind != PSK_DirectEscapeOnCall &&
+           Kind != PSK_IndirectEscapeOnCall)) &&
+         "Call must not be NULL when escaping on call");
+    for (unsigned i = 0, e = PointerEscapeCheckers.size(); i != e; ++i) {
+      // If any checker declares the state infeasible (or if it starts that
+      //  way), bail out.
+      if (!State)
+        return NULL;
+      State = PointerEscapeCheckers[i](State, Escaped, Call, Kind, IsConst);
+    }
+  return State;
+}
+
+/// \brief Run checkers for handling assumptions on symbolic values.
+ProgramStateRef 
+CheckerManager::runCheckersForEvalAssume(ProgramStateRef state,
+                                         SVal Cond, bool Assumption) {
+  for (unsigned i = 0, e = EvalAssumeCheckers.size(); i != e; ++i) {
+    // If any checker declares the state infeasible (or if it starts that way),
+    // bail out.
+    if (!state)
+      return NULL;
+    state = EvalAssumeCheckers[i](state, Cond, Assumption);
+  }
+  return state;
+}
+
+/// \brief Run checkers for evaluating a call.
+/// Only one checker will evaluate the call.
+void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst,
+                                            const ExplodedNodeSet &Src,
+                                            const CallEvent &Call,
+                                            ExprEngine &Eng) {
+  const CallExpr *CE = cast<CallExpr>(Call.getOriginExpr());
+  for (ExplodedNodeSet::iterator
+         NI = Src.begin(), NE = Src.end(); NI != NE; ++NI) {
+    ExplodedNode *Pred = *NI;
+    bool anyEvaluated = false;
+
+    ExplodedNodeSet checkDst;
+    NodeBuilder B(Pred, checkDst, Eng.getBuilderContext());
+
+    // Check if any of the EvalCall callbacks can evaluate the call.
+    for (std::vector<EvalCallFunc>::iterator
+           EI = EvalCallCheckers.begin(), EE = EvalCallCheckers.end();
+         EI != EE; ++EI) {
+      ProgramPoint::Kind K = ProgramPoint::PostStmtKind;
+      const ProgramPoint &L = ProgramPoint::getProgramPoint(CE, K,
+                                Pred->getLocationContext(), EI->Checker);
+      bool evaluated = false;
+      { // CheckerContext generates transitions(populates checkDest) on
+        // destruction, so introduce the scope to make sure it gets properly
+        // populated.
+        CheckerContext C(B, Eng, Pred, L);
+        evaluated = (*EI)(CE, C);
+      }
+      assert(!(evaluated && anyEvaluated)
+             && "There are more than one checkers evaluating the call");
+      if (evaluated) {
+        anyEvaluated = true;
+        Dst.insert(checkDst);
+#ifdef NDEBUG
+        break; // on release don't check that no other checker also evals.
+#endif
+      }
+    }
+    
+    // If none of the checkers evaluated the call, ask ExprEngine to handle it.
+    if (!anyEvaluated) {
+      NodeBuilder B(Pred, Dst, Eng.getBuilderContext());
+      Eng.defaultEvalCall(B, Pred, Call);
+    }
+  }
+}
+
+/// \brief Run checkers for the entire Translation Unit.
+void CheckerManager::runCheckersOnEndOfTranslationUnit(
+                                                  const TranslationUnitDecl *TU,
+                                                  AnalysisManager &mgr,
+                                                  BugReporter &BR) {
+  for (unsigned i = 0, e = EndOfTranslationUnitCheckers.size(); i != e; ++i)
+    EndOfTranslationUnitCheckers[i](TU, mgr, BR);
+}
+
+void CheckerManager::runCheckersForPrintState(raw_ostream &Out,
+                                              ProgramStateRef State,
+                                              const char *NL, const char *Sep) {
+  for (llvm::DenseMap<CheckerTag, CheckerRef>::iterator
+        I = CheckerTags.begin(), E = CheckerTags.end(); I != E; ++I)
+    I->second->printState(Out, State, NL, Sep);
+}
+
+//===----------------------------------------------------------------------===//
+// Internal registration functions for AST traversing.
+//===----------------------------------------------------------------------===//
+
+void CheckerManager::_registerForDecl(CheckDeclFunc checkfn,
+                                      HandlesDeclFunc isForDeclFn) {
+  DeclCheckerInfo info = { checkfn, isForDeclFn };
+  DeclCheckers.push_back(info);
+}
+
+void CheckerManager::_registerForBody(CheckDeclFunc checkfn) {
+  BodyCheckers.push_back(checkfn);
+}
+
+//===----------------------------------------------------------------------===//
+// Internal registration functions for path-sensitive checking.
+//===----------------------------------------------------------------------===//
+
+void CheckerManager::_registerForPreStmt(CheckStmtFunc checkfn,
+                                         HandlesStmtFunc isForStmtFn) {
+  StmtCheckerInfo info = { checkfn, isForStmtFn, /*IsPreVisit*/true };
+  StmtCheckers.push_back(info);
+}
+void CheckerManager::_registerForPostStmt(CheckStmtFunc checkfn,
+                                          HandlesStmtFunc isForStmtFn) {
+  StmtCheckerInfo info = { checkfn, isForStmtFn, /*IsPreVisit*/false };
+  StmtCheckers.push_back(info);
+}
+
+void CheckerManager::_registerForPreObjCMessage(CheckObjCMessageFunc checkfn) {
+  PreObjCMessageCheckers.push_back(checkfn);
+}
+void CheckerManager::_registerForPostObjCMessage(CheckObjCMessageFunc checkfn) {
+  PostObjCMessageCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForPreCall(CheckCallFunc checkfn) {
+  PreCallCheckers.push_back(checkfn);
+}
+void CheckerManager::_registerForPostCall(CheckCallFunc checkfn) {
+  PostCallCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForLocation(CheckLocationFunc checkfn) {
+  LocationCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForBind(CheckBindFunc checkfn) {
+  BindCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForEndAnalysis(CheckEndAnalysisFunc checkfn) {
+  EndAnalysisCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForEndFunction(CheckEndFunctionFunc checkfn) {
+  EndFunctionCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForBranchCondition(
+                                             CheckBranchConditionFunc checkfn) {
+  BranchConditionCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForLiveSymbols(CheckLiveSymbolsFunc checkfn) {
+  LiveSymbolsCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForDeadSymbols(CheckDeadSymbolsFunc checkfn) {
+  DeadSymbolsCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForRegionChanges(CheckRegionChangesFunc checkfn,
+                                     WantsRegionChangeUpdateFunc wantUpdateFn) {
+  RegionChangesCheckerInfo info = {checkfn, wantUpdateFn};
+  RegionChangesCheckers.push_back(info);
+}
+
+void CheckerManager::_registerForPointerEscape(CheckPointerEscapeFunc checkfn){
+  PointerEscapeCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForConstPointerEscape(
+                                          CheckPointerEscapeFunc checkfn) {
+  PointerEscapeCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForEvalAssume(EvalAssumeFunc checkfn) {
+  EvalAssumeCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForEvalCall(EvalCallFunc checkfn) {
+  EvalCallCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForEndOfTranslationUnit(
+                                            CheckEndOfTranslationUnit checkfn) {
+  EndOfTranslationUnitCheckers.push_back(checkfn);
+}
+
+//===----------------------------------------------------------------------===//
+// Implementation details.
+//===----------------------------------------------------------------------===//
+
+CheckerManager::CachedStmtCheckers *
+CheckerManager::getCachedStmtCheckersFor(const Stmt *S, bool isPreVisit) {
+  assert(S);
+
+  CachedStmtCheckersKey key(S->getStmtClass(), isPreVisit);
+  CachedStmtCheckers *checkers = 0;
+  CachedStmtCheckersMapTy::iterator CCI = CachedStmtCheckersMap.find(key);
+  if (CCI != CachedStmtCheckersMap.end()) {
+    checkers = &(CCI->second);
+  } else {
+    // Find the checkers that should run for this Stmt and cache them.
+    checkers = &CachedStmtCheckersMap[key];
+    for (unsigned i = 0, e = StmtCheckers.size(); i != e; ++i) {
+      StmtCheckerInfo &info = StmtCheckers[i];
+      if (info.IsPreVisit == isPreVisit && info.IsForStmtFn(S))
+        checkers->push_back(info.CheckFn);
+    }
+  }
+
+  assert(checkers);
+  return checkers;
+}
+
+CheckerManager::~CheckerManager() {
+  for (unsigned i = 0, e = CheckerDtors.size(); i != e; ++i)
+    CheckerDtors[i]();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/CheckerRegistry.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/CheckerRegistry.cpp
new file mode 100644
index 0000000..4729903
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/CheckerRegistry.cpp
@@ -0,0 +1,151 @@
+//===--- CheckerRegistry.cpp - Maintains all available checkers -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/CheckerRegistry.h"
+#include "clang/StaticAnalyzer/Core/CheckerOptInfo.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+static const char PackageSeparator = '.';
+typedef llvm::SetVector<const CheckerRegistry::CheckerInfo *> CheckerInfoSet;
+
+
+static bool checkerNameLT(const CheckerRegistry::CheckerInfo &a,
+                          const CheckerRegistry::CheckerInfo &b) {
+  return a.FullName < b.FullName;
+}
+
+static bool isInPackage(const CheckerRegistry::CheckerInfo &checker,
+                        StringRef packageName) {
+  // Does the checker's full name have the package as a prefix?
+  if (!checker.FullName.startswith(packageName))
+    return false;
+
+  // Is the package actually just the name of a specific checker?
+  if (checker.FullName.size() == packageName.size())
+    return true;
+
+  // Is the checker in the package (or a subpackage)?
+  if (checker.FullName[packageName.size()] == PackageSeparator)
+    return true;
+
+  return false;
+}
+
+static void collectCheckers(const CheckerRegistry::CheckerInfoList &checkers,
+                            const llvm::StringMap<size_t> &packageSizes,
+                            CheckerOptInfo &opt, CheckerInfoSet &collected) {
+  // Use a binary search to find the possible start of the package.
+  CheckerRegistry::CheckerInfo packageInfo(NULL, opt.getName(), "");
+  CheckerRegistry::CheckerInfoList::const_iterator e = checkers.end();
+  CheckerRegistry::CheckerInfoList::const_iterator i =
+    std::lower_bound(checkers.begin(), e, packageInfo, checkerNameLT);
+
+  // If we didn't even find a possible package, give up.
+  if (i == e)
+    return;
+
+  // If what we found doesn't actually start the package, give up.
+  if (!isInPackage(*i, opt.getName()))
+    return;
+
+  // There is at least one checker in the package; claim the option.
+  opt.claim();
+
+  // See how large the package is.
+  // If the package doesn't exist, assume the option refers to a single checker.
+  size_t size = 1;
+  llvm::StringMap<size_t>::const_iterator packageSize =
+    packageSizes.find(opt.getName());
+  if (packageSize != packageSizes.end())
+    size = packageSize->getValue();
+
+  // Step through all the checkers in the package.
+  for (e = i+size; i != e; ++i) {
+    if (opt.isEnabled())
+      collected.insert(&*i);
+    else
+      collected.remove(&*i);
+  }
+}
+
+void CheckerRegistry::addChecker(InitializationFunction fn, StringRef name,
+                                 StringRef desc) {
+  Checkers.push_back(CheckerInfo(fn, name, desc));
+
+  // Record the presence of the checker in its packages.
+  StringRef packageName, leafName;
+  llvm::tie(packageName, leafName) = name.rsplit(PackageSeparator);
+  while (!leafName.empty()) {
+    Packages[packageName] += 1;
+    llvm::tie(packageName, leafName) = packageName.rsplit(PackageSeparator);
+  }
+}
+
+void CheckerRegistry::initializeManager(CheckerManager &checkerMgr, 
+                                  SmallVectorImpl<CheckerOptInfo> &opts) const {
+  // Sort checkers for efficient collection.
+  std::sort(Checkers.begin(), Checkers.end(), checkerNameLT);
+
+  // Collect checkers enabled by the options.
+  CheckerInfoSet enabledCheckers;
+  for (SmallVectorImpl<CheckerOptInfo>::iterator
+         i = opts.begin(), e = opts.end(); i != e; ++i) {
+    collectCheckers(Checkers, Packages, *i, enabledCheckers);
+  }
+
+  // Initialize the CheckerManager with all enabled checkers.
+  for (CheckerInfoSet::iterator
+         i = enabledCheckers.begin(), e = enabledCheckers.end(); i != e; ++i) {
+    (*i)->Initialize(checkerMgr);
+  }
+}
+
+void CheckerRegistry::printHelp(raw_ostream &out,
+                                size_t maxNameChars) const {
+  // FIXME: Alphabetical sort puts 'experimental' in the middle.
+  // Would it be better to name it '~experimental' or something else
+  // that's ASCIIbetically last?
+  std::sort(Checkers.begin(), Checkers.end(), checkerNameLT);
+
+  // FIXME: Print available packages.
+
+  out << "CHECKERS:\n";
+
+  // Find the maximum option length.
+  size_t optionFieldWidth = 0;
+  for (CheckerInfoList::const_iterator i = Checkers.begin(), e = Checkers.end();
+       i != e; ++i) {
+    // Limit the amount of padding we are willing to give up for alignment.
+    //   Package.Name     Description  [Hidden]
+    size_t nameLength = i->FullName.size();
+    if (nameLength <= maxNameChars)
+      optionFieldWidth = std::max(optionFieldWidth, nameLength);
+  }
+
+  const size_t initialPad = 2;
+  for (CheckerInfoList::const_iterator i = Checkers.begin(), e = Checkers.end();
+       i != e; ++i) {
+    out.indent(initialPad) << i->FullName;
+
+    int pad = optionFieldWidth - i->FullName.size();
+
+    // Break on long option names.
+    if (pad < 0) {
+      out << '\n';
+      pad = optionFieldWidth + initialPad;
+    }
+    out.indent(pad + 2) << i->Desc;
+
+    out << '\n';
+  }
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/ConstraintManager.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/ConstraintManager.cpp
new file mode 100644
index 0000000..4dec526
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/ConstraintManager.cpp
@@ -0,0 +1,39 @@
+//== ConstraintManager.cpp - Constraints on symbolic values -----*- C++ -*--==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defined the interface to manage constraints on symbolic values.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+
+using namespace clang;
+using namespace ento;
+
+ConstraintManager::~ConstraintManager() {}
+
+static DefinedSVal getLocFromSymbol(const ProgramStateRef &State,
+                                    SymbolRef Sym) {
+  const MemRegion *R = State->getStateManager().getRegionManager()
+                                               .getSymbolicRegion(Sym);
+  return loc::MemRegionVal(R);
+}
+
+ConditionTruthVal ConstraintManager::checkNull(ProgramStateRef State,
+                                               SymbolRef Sym) {  
+  QualType Ty = Sym->getType();
+  DefinedSVal V = Loc::isLocType(Ty) ? getLocFromSymbol(State, Sym)
+                                     : nonloc::SymbolVal(Sym);
+  const ProgramStatePair &P = assumeDual(State, V);
+  if (P.first && !P.second)
+    return ConditionTruthVal(false);
+  if (!P.first && P.second)
+    return ConditionTruthVal(true);
+  return ConditionTruthVal();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
new file mode 100644
index 0000000..b09b2c2
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
@@ -0,0 +1,708 @@
+//==- CoreEngine.cpp - Path-Sensitive Dataflow Engine ------------*- C++ -*-//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines a generic engine for intraprocedural, path-sensitive,
+//  dataflow analysis via graph reachability engine.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "CoreEngine"
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/Casting.h"
+
+using namespace clang;
+using namespace ento;
+
+STATISTIC(NumSteps,
+            "The # of steps executed.");
+STATISTIC(NumReachedMaxSteps,
+            "The # of times we reached the max number of steps.");
+STATISTIC(NumPathsExplored,
+            "The # of paths explored by the analyzer.");
+
+//===----------------------------------------------------------------------===//
+// Worklist classes for exploration of reachable states.
+//===----------------------------------------------------------------------===//
+
+WorkList::Visitor::~Visitor() {}
+
+namespace {
+class DFS : public WorkList {
+  SmallVector<WorkListUnit,20> Stack;
+public:
+  virtual bool hasWork() const {
+    return !Stack.empty();
+  }
+
+  virtual void enqueue(const WorkListUnit& U) {
+    Stack.push_back(U);
+  }
+
+  virtual WorkListUnit dequeue() {
+    assert (!Stack.empty());
+    const WorkListUnit& U = Stack.back();
+    Stack.pop_back(); // This technically "invalidates" U, but we are fine.
+    return U;
+  }
+  
+  virtual bool visitItemsInWorkList(Visitor &V) {
+    for (SmallVectorImpl<WorkListUnit>::iterator
+         I = Stack.begin(), E = Stack.end(); I != E; ++I) {
+      if (V.visit(*I))
+        return true;
+    }
+    return false;
+  }
+};
+
+class BFS : public WorkList {
+  std::deque<WorkListUnit> Queue;
+public:
+  virtual bool hasWork() const {
+    return !Queue.empty();
+  }
+
+  virtual void enqueue(const WorkListUnit& U) {
+    Queue.push_back(U);
+  }
+
+  virtual WorkListUnit dequeue() {
+    WorkListUnit U = Queue.front();
+    Queue.pop_front();
+    return U;
+  }
+  
+  virtual bool visitItemsInWorkList(Visitor &V) {
+    for (std::deque<WorkListUnit>::iterator
+         I = Queue.begin(), E = Queue.end(); I != E; ++I) {
+      if (V.visit(*I))
+        return true;
+    }
+    return false;
+  }
+};
+
+} // end anonymous namespace
+
+// Place the dstor for WorkList here because it contains virtual member
+// functions, and we the code for the dstor generated in one compilation unit.
+WorkList::~WorkList() {}
+
+WorkList *WorkList::makeDFS() { return new DFS(); }
+WorkList *WorkList::makeBFS() { return new BFS(); }
+
+namespace {
+  class BFSBlockDFSContents : public WorkList {
+    std::deque<WorkListUnit> Queue;
+    SmallVector<WorkListUnit,20> Stack;
+  public:
+    virtual bool hasWork() const {
+      return !Queue.empty() || !Stack.empty();
+    }
+
+    virtual void enqueue(const WorkListUnit& U) {
+      if (U.getNode()->getLocation().getAs<BlockEntrance>())
+        Queue.push_front(U);
+      else
+        Stack.push_back(U);
+    }
+
+    virtual WorkListUnit dequeue() {
+      // Process all basic blocks to completion.
+      if (!Stack.empty()) {
+        const WorkListUnit& U = Stack.back();
+        Stack.pop_back(); // This technically "invalidates" U, but we are fine.
+        return U;
+      }
+
+      assert(!Queue.empty());
+      // Don't use const reference.  The subsequent pop_back() might make it
+      // unsafe.
+      WorkListUnit U = Queue.front();
+      Queue.pop_front();
+      return U;
+    }
+    virtual bool visitItemsInWorkList(Visitor &V) {
+      for (SmallVectorImpl<WorkListUnit>::iterator
+           I = Stack.begin(), E = Stack.end(); I != E; ++I) {
+        if (V.visit(*I))
+          return true;
+      }
+      for (std::deque<WorkListUnit>::iterator
+           I = Queue.begin(), E = Queue.end(); I != E; ++I) {
+        if (V.visit(*I))
+          return true;
+      }
+      return false;
+    }
+
+  };
+} // end anonymous namespace
+
+WorkList* WorkList::makeBFSBlockDFSContents() {
+  return new BFSBlockDFSContents();
+}
+
+//===----------------------------------------------------------------------===//
+// Core analysis engine.
+//===----------------------------------------------------------------------===//
+
+/// ExecuteWorkList - Run the worklist algorithm for a maximum number of steps.
+bool CoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps,
+                                   ProgramStateRef InitState) {
+
+  if (G->num_roots() == 0) { // Initialize the analysis by constructing
+    // the root if none exists.
+
+    const CFGBlock *Entry = &(L->getCFG()->getEntry());
+
+    assert (Entry->empty() &&
+            "Entry block must be empty.");
+
+    assert (Entry->succ_size() == 1 &&
+            "Entry block must have 1 successor.");
+
+    // Mark the entry block as visited.
+    FunctionSummaries->markVisitedBasicBlock(Entry->getBlockID(),
+                                             L->getDecl(),
+                                             L->getCFG()->getNumBlockIDs());
+
+    // Get the solitary successor.
+    const CFGBlock *Succ = *(Entry->succ_begin());
+
+    // Construct an edge representing the
+    // starting location in the function.
+    BlockEdge StartLoc(Entry, Succ, L);
+
+    // Set the current block counter to being empty.
+    WList->setBlockCounter(BCounterFactory.GetEmptyCounter());
+
+    if (!InitState)
+      // Generate the root.
+      generateNode(StartLoc, SubEng.getInitialState(L), 0);
+    else
+      generateNode(StartLoc, InitState, 0);
+  }
+
+  // Check if we have a steps limit
+  bool UnlimitedSteps = Steps == 0;
+
+  while (WList->hasWork()) {
+    if (!UnlimitedSteps) {
+      if (Steps == 0) {
+        NumReachedMaxSteps++;
+        break;
+      }
+      --Steps;
+    }
+
+    NumSteps++;
+
+    const WorkListUnit& WU = WList->dequeue();
+
+    // Set the current block counter.
+    WList->setBlockCounter(WU.getBlockCounter());
+
+    // Retrieve the node.
+    ExplodedNode *Node = WU.getNode();
+
+    dispatchWorkItem(Node, Node->getLocation(), WU);
+  }
+  SubEng.processEndWorklist(hasWorkRemaining());
+  return WList->hasWork();
+}
+
+void CoreEngine::dispatchWorkItem(ExplodedNode* Pred, ProgramPoint Loc,
+                                  const WorkListUnit& WU) {
+  // Dispatch on the location type.
+  switch (Loc.getKind()) {
+    case ProgramPoint::BlockEdgeKind:
+      HandleBlockEdge(Loc.castAs<BlockEdge>(), Pred);
+      break;
+
+    case ProgramPoint::BlockEntranceKind:
+      HandleBlockEntrance(Loc.castAs<BlockEntrance>(), Pred);
+      break;
+
+    case ProgramPoint::BlockExitKind:
+      assert (false && "BlockExit location never occur in forward analysis.");
+      break;
+
+    case ProgramPoint::CallEnterKind: {
+      CallEnter CEnter = Loc.castAs<CallEnter>();
+      SubEng.processCallEnter(CEnter, Pred);
+      break;
+    }
+
+    case ProgramPoint::CallExitBeginKind:
+      SubEng.processCallExit(Pred);
+      break;
+
+    case ProgramPoint::EpsilonKind: {
+      assert(Pred->hasSinglePred() &&
+             "Assume epsilon has exactly one predecessor by construction");
+      ExplodedNode *PNode = Pred->getFirstPred();
+      dispatchWorkItem(Pred, PNode->getLocation(), WU);
+      break;
+    }
+    default:
+      assert(Loc.getAs<PostStmt>() ||
+             Loc.getAs<PostInitializer>() ||
+             Loc.getAs<PostImplicitCall>() ||
+             Loc.getAs<CallExitEnd>());
+      HandlePostStmt(WU.getBlock(), WU.getIndex(), Pred);
+      break;
+  }
+}
+
+bool CoreEngine::ExecuteWorkListWithInitialState(const LocationContext *L,
+                                                 unsigned Steps,
+                                                 ProgramStateRef InitState, 
+                                                 ExplodedNodeSet &Dst) {
+  bool DidNotFinish = ExecuteWorkList(L, Steps, InitState);
+  for (ExplodedGraph::eop_iterator I = G->eop_begin(), 
+                                   E = G->eop_end(); I != E; ++I) {
+    Dst.Add(*I);
+  }
+  return DidNotFinish;
+}
+
+void CoreEngine::HandleBlockEdge(const BlockEdge &L, ExplodedNode *Pred) {
+
+  const CFGBlock *Blk = L.getDst();
+  NodeBuilderContext BuilderCtx(*this, Blk, Pred);
+
+  // Mark this block as visited.
+  const LocationContext *LC = Pred->getLocationContext();
+  FunctionSummaries->markVisitedBasicBlock(Blk->getBlockID(),
+                                           LC->getDecl(),
+                                           LC->getCFG()->getNumBlockIDs());
+
+  // Check if we are entering the EXIT block.
+  if (Blk == &(L.getLocationContext()->getCFG()->getExit())) {
+
+    assert (L.getLocationContext()->getCFG()->getExit().size() == 0
+            && "EXIT block cannot contain Stmts.");
+
+    // Process the final state transition.
+    SubEng.processEndOfFunction(BuilderCtx, Pred);
+
+    // This path is done. Don't enqueue any more nodes.
+    return;
+  }
+
+  // Call into the SubEngine to process entering the CFGBlock.
+  ExplodedNodeSet dstNodes;
+  BlockEntrance BE(Blk, Pred->getLocationContext());
+  NodeBuilderWithSinks nodeBuilder(Pred, dstNodes, BuilderCtx, BE);
+  SubEng.processCFGBlockEntrance(L, nodeBuilder, Pred);
+
+  // Auto-generate a node.
+  if (!nodeBuilder.hasGeneratedNodes()) {
+    nodeBuilder.generateNode(Pred->State, Pred);
+  }
+
+  // Enqueue nodes onto the worklist.
+  enqueue(dstNodes);
+}
+
+void CoreEngine::HandleBlockEntrance(const BlockEntrance &L,
+                                       ExplodedNode *Pred) {
+
+  // Increment the block counter.
+  const LocationContext *LC = Pred->getLocationContext();
+  unsigned BlockId = L.getBlock()->getBlockID();
+  BlockCounter Counter = WList->getBlockCounter();
+  Counter = BCounterFactory.IncrementCount(Counter, LC->getCurrentStackFrame(),
+                                           BlockId);
+  WList->setBlockCounter(Counter);
+
+  // Process the entrance of the block.
+  if (Optional<CFGElement> E = L.getFirstElement()) {
+    NodeBuilderContext Ctx(*this, L.getBlock(), Pred);
+    SubEng.processCFGElement(*E, Pred, 0, &Ctx);
+  }
+  else
+    HandleBlockExit(L.getBlock(), Pred);
+}
+
+void CoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode *Pred) {
+
+  if (const Stmt *Term = B->getTerminator()) {
+    switch (Term->getStmtClass()) {
+      default:
+        llvm_unreachable("Analysis for this terminator not implemented.");
+
+      // Model static initializers.
+      case Stmt::DeclStmtClass:
+        HandleStaticInit(cast<DeclStmt>(Term), B, Pred);
+        return;
+
+      case Stmt::BinaryOperatorClass: // '&&' and '||'
+        HandleBranch(cast<BinaryOperator>(Term)->getLHS(), Term, B, Pred);
+        return;
+
+      case Stmt::BinaryConditionalOperatorClass:
+      case Stmt::ConditionalOperatorClass:
+        HandleBranch(cast<AbstractConditionalOperator>(Term)->getCond(),
+                     Term, B, Pred);
+        return;
+
+        // FIXME: Use constant-folding in CFG construction to simplify this
+        // case.
+
+      case Stmt::ChooseExprClass:
+        HandleBranch(cast<ChooseExpr>(Term)->getCond(), Term, B, Pred);
+        return;
+
+      case Stmt::CXXTryStmtClass: {
+        // Generate a node for each of the successors.
+        // Our logic for EH analysis can certainly be improved.
+        for (CFGBlock::const_succ_iterator it = B->succ_begin(),
+             et = B->succ_end(); it != et; ++it) {
+          if (const CFGBlock *succ = *it) {
+            generateNode(BlockEdge(B, succ, Pred->getLocationContext()),
+                         Pred->State, Pred);
+          }
+        }
+        return;
+      }
+        
+      case Stmt::DoStmtClass:
+        HandleBranch(cast<DoStmt>(Term)->getCond(), Term, B, Pred);
+        return;
+
+      case Stmt::CXXForRangeStmtClass:
+        HandleBranch(cast<CXXForRangeStmt>(Term)->getCond(), Term, B, Pred);
+        return;
+
+      case Stmt::ForStmtClass:
+        HandleBranch(cast<ForStmt>(Term)->getCond(), Term, B, Pred);
+        return;
+
+      case Stmt::ContinueStmtClass:
+      case Stmt::BreakStmtClass:
+      case Stmt::GotoStmtClass:
+        break;
+
+      case Stmt::IfStmtClass:
+        HandleBranch(cast<IfStmt>(Term)->getCond(), Term, B, Pred);
+        return;
+
+      case Stmt::IndirectGotoStmtClass: {
+        // Only 1 successor: the indirect goto dispatch block.
+        assert (B->succ_size() == 1);
+
+        IndirectGotoNodeBuilder
+           builder(Pred, B, cast<IndirectGotoStmt>(Term)->getTarget(),
+                   *(B->succ_begin()), this);
+
+        SubEng.processIndirectGoto(builder);
+        return;
+      }
+
+      case Stmt::ObjCForCollectionStmtClass: {
+        // In the case of ObjCForCollectionStmt, it appears twice in a CFG:
+        //
+        //  (1) inside a basic block, which represents the binding of the
+        //      'element' variable to a value.
+        //  (2) in a terminator, which represents the branch.
+        //
+        // For (1), subengines will bind a value (i.e., 0 or 1) indicating
+        // whether or not collection contains any more elements.  We cannot
+        // just test to see if the element is nil because a container can
+        // contain nil elements.
+        HandleBranch(Term, Term, B, Pred);
+        return;
+      }
+
+      case Stmt::SwitchStmtClass: {
+        SwitchNodeBuilder builder(Pred, B, cast<SwitchStmt>(Term)->getCond(),
+                                    this);
+
+        SubEng.processSwitch(builder);
+        return;
+      }
+
+      case Stmt::WhileStmtClass:
+        HandleBranch(cast<WhileStmt>(Term)->getCond(), Term, B, Pred);
+        return;
+    }
+  }
+
+  assert (B->succ_size() == 1 &&
+          "Blocks with no terminator should have at most 1 successor.");
+
+  generateNode(BlockEdge(B, *(B->succ_begin()), Pred->getLocationContext()),
+               Pred->State, Pred);
+}
+
+void CoreEngine::HandleBranch(const Stmt *Cond, const Stmt *Term, 
+                                const CFGBlock * B, ExplodedNode *Pred) {
+  assert(B->succ_size() == 2);
+  NodeBuilderContext Ctx(*this, B, Pred);
+  ExplodedNodeSet Dst;
+  SubEng.processBranch(Cond, Term, Ctx, Pred, Dst,
+                       *(B->succ_begin()), *(B->succ_begin()+1));
+  // Enqueue the new frontier onto the worklist.
+  enqueue(Dst);
+}
+
+
+void CoreEngine::HandleStaticInit(const DeclStmt *DS, const CFGBlock *B,
+                                  ExplodedNode *Pred) {
+  assert(B->succ_size() == 2);
+  NodeBuilderContext Ctx(*this, B, Pred);
+  ExplodedNodeSet Dst;
+  SubEng.processStaticInitializer(DS, Ctx, Pred, Dst,
+                                  *(B->succ_begin()), *(B->succ_begin()+1));
+  // Enqueue the new frontier onto the worklist.
+  enqueue(Dst);
+}
+
+
+void CoreEngine::HandlePostStmt(const CFGBlock *B, unsigned StmtIdx, 
+                                  ExplodedNode *Pred) {
+  assert(B);
+  assert(!B->empty());
+
+  if (StmtIdx == B->size())
+    HandleBlockExit(B, Pred);
+  else {
+    NodeBuilderContext Ctx(*this, B, Pred);
+    SubEng.processCFGElement((*B)[StmtIdx], Pred, StmtIdx, &Ctx);
+  }
+}
+
+/// generateNode - Utility method to generate nodes, hook up successors,
+///  and add nodes to the worklist.
+void CoreEngine::generateNode(const ProgramPoint &Loc,
+                              ProgramStateRef State,
+                              ExplodedNode *Pred) {
+
+  bool IsNew;
+  ExplodedNode *Node = G->getNode(Loc, State, false, &IsNew);
+
+  if (Pred)
+    Node->addPredecessor(Pred, *G);  // Link 'Node' with its predecessor.
+  else {
+    assert (IsNew);
+    G->addRoot(Node);  // 'Node' has no predecessor.  Make it a root.
+  }
+
+  // Only add 'Node' to the worklist if it was freshly generated.
+  if (IsNew) WList->enqueue(Node);
+}
+
+void CoreEngine::enqueueStmtNode(ExplodedNode *N,
+                                 const CFGBlock *Block, unsigned Idx) {
+  assert(Block);
+  assert (!N->isSink());
+
+  // Check if this node entered a callee.
+  if (N->getLocation().getAs<CallEnter>()) {
+    // Still use the index of the CallExpr. It's needed to create the callee
+    // StackFrameContext.
+    WList->enqueue(N, Block, Idx);
+    return;
+  }
+
+  // Do not create extra nodes. Move to the next CFG element.
+  if (N->getLocation().getAs<PostInitializer>() ||
+      N->getLocation().getAs<PostImplicitCall>()) {
+    WList->enqueue(N, Block, Idx+1);
+    return;
+  }
+
+  if (N->getLocation().getAs<EpsilonPoint>()) {
+    WList->enqueue(N, Block, Idx);
+    return;
+  }
+
+  // At this point, we know we're processing a normal statement.
+  CFGStmt CS = (*Block)[Idx].castAs<CFGStmt>();
+  PostStmt Loc(CS.getStmt(), N->getLocationContext());
+
+  if (Loc == N->getLocation()) {
+    // Note: 'N' should be a fresh node because otherwise it shouldn't be
+    // a member of Deferred.
+    WList->enqueue(N, Block, Idx+1);
+    return;
+  }
+
+  bool IsNew;
+  ExplodedNode *Succ = G->getNode(Loc, N->getState(), false, &IsNew);
+  Succ->addPredecessor(N, *G);
+
+  if (IsNew)
+    WList->enqueue(Succ, Block, Idx+1);
+}
+
+ExplodedNode *CoreEngine::generateCallExitBeginNode(ExplodedNode *N) {
+  // Create a CallExitBegin node and enqueue it.
+  const StackFrameContext *LocCtx
+                         = cast<StackFrameContext>(N->getLocationContext());
+
+  // Use the callee location context.
+  CallExitBegin Loc(LocCtx);
+
+  bool isNew;
+  ExplodedNode *Node = G->getNode(Loc, N->getState(), false, &isNew);
+  Node->addPredecessor(N, *G);
+  return isNew ? Node : 0;
+}
+
+
+void CoreEngine::enqueue(ExplodedNodeSet &Set) {
+  for (ExplodedNodeSet::iterator I = Set.begin(),
+                                 E = Set.end(); I != E; ++I) {
+    WList->enqueue(*I);
+  }
+}
+
+void CoreEngine::enqueue(ExplodedNodeSet &Set,
+                         const CFGBlock *Block, unsigned Idx) {
+  for (ExplodedNodeSet::iterator I = Set.begin(),
+                                 E = Set.end(); I != E; ++I) {
+    enqueueStmtNode(*I, Block, Idx);
+  }
+}
+
+void CoreEngine::enqueueEndOfFunction(ExplodedNodeSet &Set) {
+  for (ExplodedNodeSet::iterator I = Set.begin(), E = Set.end(); I != E; ++I) {
+    ExplodedNode *N = *I;
+    // If we are in an inlined call, generate CallExitBegin node.
+    if (N->getLocationContext()->getParent()) {
+      N = generateCallExitBeginNode(N);
+      if (N)
+        WList->enqueue(N);
+    } else {
+      // TODO: We should run remove dead bindings here.
+      G->addEndOfPath(N);
+      NumPathsExplored++;
+    }
+  }
+}
+
+
+void NodeBuilder::anchor() { }
+
+ExplodedNode* NodeBuilder::generateNodeImpl(const ProgramPoint &Loc,
+                                            ProgramStateRef State,
+                                            ExplodedNode *FromN,
+                                            bool MarkAsSink) {
+  HasGeneratedNodes = true;
+  bool IsNew;
+  ExplodedNode *N = C.Eng.G->getNode(Loc, State, MarkAsSink, &IsNew);
+  N->addPredecessor(FromN, *C.Eng.G);
+  Frontier.erase(FromN);
+
+  if (!IsNew)
+    return 0;
+
+  if (!MarkAsSink)
+    Frontier.Add(N);
+
+  return N;
+}
+
+void NodeBuilderWithSinks::anchor() { }
+
+StmtNodeBuilder::~StmtNodeBuilder() {
+  if (EnclosingBldr)
+    for (ExplodedNodeSet::iterator I = Frontier.begin(),
+                                   E = Frontier.end(); I != E; ++I )
+      EnclosingBldr->addNodes(*I);
+}
+
+void BranchNodeBuilder::anchor() { }
+
+ExplodedNode *BranchNodeBuilder::generateNode(ProgramStateRef State,
+                                              bool branch,
+                                              ExplodedNode *NodePred) {
+  // If the branch has been marked infeasible we should not generate a node.
+  if (!isFeasible(branch))
+    return NULL;
+
+  ProgramPoint Loc = BlockEdge(C.Block, branch ? DstT:DstF,
+                               NodePred->getLocationContext());
+  ExplodedNode *Succ = generateNodeImpl(Loc, State, NodePred);
+  return Succ;
+}
+
+ExplodedNode*
+IndirectGotoNodeBuilder::generateNode(const iterator &I,
+                                      ProgramStateRef St,
+                                      bool IsSink) {
+  bool IsNew;
+  ExplodedNode *Succ = Eng.G->getNode(BlockEdge(Src, I.getBlock(),
+                                      Pred->getLocationContext()), St,
+                                      IsSink, &IsNew);
+  Succ->addPredecessor(Pred, *Eng.G);
+
+  if (!IsNew)
+    return 0;
+
+  if (!IsSink)
+    Eng.WList->enqueue(Succ);
+
+  return Succ;
+}
+
+
+ExplodedNode*
+SwitchNodeBuilder::generateCaseStmtNode(const iterator &I,
+                                        ProgramStateRef St) {
+
+  bool IsNew;
+  ExplodedNode *Succ = Eng.G->getNode(BlockEdge(Src, I.getBlock(),
+                                      Pred->getLocationContext()), St,
+                                      false, &IsNew);
+  Succ->addPredecessor(Pred, *Eng.G);
+  if (!IsNew)
+    return 0;
+
+  Eng.WList->enqueue(Succ);
+  return Succ;
+}
+
+
+ExplodedNode*
+SwitchNodeBuilder::generateDefaultCaseNode(ProgramStateRef St,
+                                           bool IsSink) {
+  // Get the block for the default case.
+  assert(Src->succ_rbegin() != Src->succ_rend());
+  CFGBlock *DefaultBlock = *Src->succ_rbegin();
+
+  // Sanity check for default blocks that are unreachable and not caught
+  // by earlier stages.
+  if (!DefaultBlock)
+    return NULL;
+  
+  bool IsNew;
+  ExplodedNode *Succ = Eng.G->getNode(BlockEdge(Src, DefaultBlock,
+                                      Pred->getLocationContext()), St,
+                                      IsSink, &IsNew);
+  Succ->addPredecessor(Pred, *Eng.G);
+
+  if (!IsNew)
+    return 0;
+
+  if (!IsSink)
+    Eng.WList->enqueue(Succ);
+
+  return Succ;
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/Environment.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/Environment.cpp
new file mode 100644
index 0000000..7b133f6
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/Environment.cpp
@@ -0,0 +1,214 @@
+//== Environment.cpp - Map from Stmt* to Locations/Values -------*- C++ -*--==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defined the Environment and EnvironmentManager classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+static const Expr *ignoreTransparentExprs(const Expr *E) {
+  E = E->IgnoreParens();
+
+  switch (E->getStmtClass()) {
+  case Stmt::OpaqueValueExprClass:
+    E = cast<OpaqueValueExpr>(E)->getSourceExpr();
+    break;
+  case Stmt::ExprWithCleanupsClass:
+    E = cast<ExprWithCleanups>(E)->getSubExpr();
+    break;
+  case Stmt::CXXBindTemporaryExprClass:
+    E = cast<CXXBindTemporaryExpr>(E)->getSubExpr();
+    break;
+  case Stmt::SubstNonTypeTemplateParmExprClass:
+    E = cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement();
+    break;
+  default:
+    // This is the base case: we can't look through more than we already have.
+    return E;
+  }
+
+  return ignoreTransparentExprs(E);
+}
+
+static const Stmt *ignoreTransparentExprs(const Stmt *S) {
+  if (const Expr *E = dyn_cast<Expr>(S))
+    return ignoreTransparentExprs(E);
+  return S;
+}
+
+EnvironmentEntry::EnvironmentEntry(const Stmt *S, const LocationContext *L)
+  : std::pair<const Stmt *,
+              const StackFrameContext *>(ignoreTransparentExprs(S),
+                                         L ? L->getCurrentStackFrame() : 0) {}
+
+SVal Environment::lookupExpr(const EnvironmentEntry &E) const {
+  const SVal* X = ExprBindings.lookup(E);
+  if (X) {
+    SVal V = *X;
+    return V;
+  }
+  return UnknownVal();
+}
+
+SVal Environment::getSVal(const EnvironmentEntry &Entry,
+                          SValBuilder& svalBuilder) const {
+  const Stmt *S = Entry.getStmt();
+  const LocationContext *LCtx = Entry.getLocationContext();
+
+  switch (S->getStmtClass()) {
+  case Stmt::CXXBindTemporaryExprClass:
+  case Stmt::ExprWithCleanupsClass:
+  case Stmt::GenericSelectionExprClass:
+  case Stmt::OpaqueValueExprClass:
+  case Stmt::ParenExprClass:
+  case Stmt::SubstNonTypeTemplateParmExprClass:
+    llvm_unreachable("Should have been handled by ignoreTransparentExprs");
+
+  case Stmt::AddrLabelExprClass:
+  case Stmt::CharacterLiteralClass:
+  case Stmt::CXXBoolLiteralExprClass:
+  case Stmt::CXXScalarValueInitExprClass:
+  case Stmt::ImplicitValueInitExprClass:
+  case Stmt::IntegerLiteralClass:
+  case Stmt::ObjCBoolLiteralExprClass:
+  case Stmt::CXXNullPtrLiteralExprClass:
+  case Stmt::ObjCStringLiteralClass:
+  case Stmt::StringLiteralClass:
+    // Known constants; defer to SValBuilder.
+    return svalBuilder.getConstantVal(cast<Expr>(S)).getValue();
+
+  case Stmt::ReturnStmtClass: {
+    const ReturnStmt *RS = cast<ReturnStmt>(S);
+    if (const Expr *RE = RS->getRetValue())
+      return getSVal(EnvironmentEntry(RE, LCtx), svalBuilder);
+    return UndefinedVal();        
+  }
+    
+  // Handle all other Stmt* using a lookup.
+  default:
+    return lookupExpr(EnvironmentEntry(S, LCtx));
+  }
+}
+
+Environment EnvironmentManager::bindExpr(Environment Env,
+                                         const EnvironmentEntry &E,
+                                         SVal V,
+                                         bool Invalidate) {
+  if (V.isUnknown()) {
+    if (Invalidate)
+      return Environment(F.remove(Env.ExprBindings, E));
+    else
+      return Env;
+  }
+  return Environment(F.add(Env.ExprBindings, E, V));
+}
+
+namespace {
+class MarkLiveCallback : public SymbolVisitor {
+  SymbolReaper &SymReaper;
+public:
+  MarkLiveCallback(SymbolReaper &symreaper) : SymReaper(symreaper) {}
+  bool VisitSymbol(SymbolRef sym) {
+    SymReaper.markLive(sym);
+    return true;
+  }
+  bool VisitMemRegion(const MemRegion *R) {
+    SymReaper.markLive(R);
+    return true;
+  }
+};
+} // end anonymous namespace
+
+// removeDeadBindings:
+//  - Remove subexpression bindings.
+//  - Remove dead block expression bindings.
+//  - Keep live block expression bindings:
+//   - Mark their reachable symbols live in SymbolReaper,
+//     see ScanReachableSymbols.
+//   - Mark the region in DRoots if the binding is a loc::MemRegionVal.
+Environment
+EnvironmentManager::removeDeadBindings(Environment Env,
+                                       SymbolReaper &SymReaper,
+                                       ProgramStateRef ST) {
+
+  // We construct a new Environment object entirely, as this is cheaper than
+  // individually removing all the subexpression bindings (which will greatly
+  // outnumber block-level expression bindings).
+  Environment NewEnv = getInitialEnvironment();
+
+  MarkLiveCallback CB(SymReaper);
+  ScanReachableSymbols RSScaner(ST, CB);
+
+  llvm::ImmutableMapRef<EnvironmentEntry,SVal>
+    EBMapRef(NewEnv.ExprBindings.getRootWithoutRetain(),
+             F.getTreeFactory());
+
+  // Iterate over the block-expr bindings.
+  for (Environment::iterator I = Env.begin(), E = Env.end();
+       I != E; ++I) {
+
+    const EnvironmentEntry &BlkExpr = I.getKey();
+    const SVal &X = I.getData();
+
+    if (SymReaper.isLive(BlkExpr.getStmt(), BlkExpr.getLocationContext())) {
+      // Copy the binding to the new map.
+      EBMapRef = EBMapRef.add(BlkExpr, X);
+
+      // If the block expr's value is a memory region, then mark that region.
+      if (Optional<loc::MemRegionVal> R = X.getAs<loc::MemRegionVal>())
+        SymReaper.markLive(R->getRegion());
+
+      // Mark all symbols in the block expr's value live.
+      RSScaner.scan(X);
+      continue;
+    } else {
+      SymExpr::symbol_iterator SI = X.symbol_begin(), SE = X.symbol_end();
+      for (; SI != SE; ++SI)
+        SymReaper.maybeDead(*SI);
+    }
+  }
+
+  NewEnv.ExprBindings = EBMapRef.asImmutableMap();
+  return NewEnv;
+}
+
+void Environment::print(raw_ostream &Out, const char *NL,
+                        const char *Sep) const {
+  bool isFirst = true;
+
+  for (Environment::iterator I = begin(), E = end(); I != E; ++I) {
+    const EnvironmentEntry &En = I.getKey();
+    
+    if (isFirst) {
+      Out << NL << NL
+          << "Expressions:"
+          << NL;      
+      isFirst = false;
+    } else {
+      Out << NL;
+    }
+    
+    const Stmt *S = En.getStmt();
+    
+    Out << " (" << (const void*) En.getLocationContext() << ','
+      << (const void*) S << ") ";
+    LangOptions LO; // FIXME.
+    S->printPretty(Out, 0, PrintingPolicy(LO));
+    Out << " : " << I.getData();
+  }
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
new file mode 100644
index 0000000..af9518a
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
@@ -0,0 +1,444 @@
+//=-- ExplodedGraph.cpp - Local, Path-Sens. "Exploded Graph" -*- C++ -*------=//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines the template classes ExplodedNode and ExplodedGraph,
+//  which represent a path-sensitive, intra-procedural "exploded graph."
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/Stmt.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Statistic.h"
+#include <vector>
+
+using namespace clang;
+using namespace ento;
+
+//===----------------------------------------------------------------------===//
+// Node auditing.
+//===----------------------------------------------------------------------===//
+
+// An out of line virtual method to provide a home for the class vtable.
+ExplodedNode::Auditor::~Auditor() {}
+
+#ifndef NDEBUG
+static ExplodedNode::Auditor* NodeAuditor = 0;
+#endif
+
+void ExplodedNode::SetAuditor(ExplodedNode::Auditor* A) {
+#ifndef NDEBUG
+  NodeAuditor = A;
+#endif
+}
+
+//===----------------------------------------------------------------------===//
+// Cleanup.
+//===----------------------------------------------------------------------===//
+
+ExplodedGraph::ExplodedGraph()
+  : NumNodes(0), ReclaimNodeInterval(0) {}
+
+ExplodedGraph::~ExplodedGraph() {}
+
+//===----------------------------------------------------------------------===//
+// Node reclamation.
+//===----------------------------------------------------------------------===//
+
+bool ExplodedGraph::isInterestingLValueExpr(const Expr *Ex) {
+  if (!Ex->isLValue())
+    return false;
+  return isa<DeclRefExpr>(Ex) ||
+         isa<MemberExpr>(Ex) ||
+         isa<ObjCIvarRefExpr>(Ex);
+}
+
+bool ExplodedGraph::shouldCollect(const ExplodedNode *node) {
+  // First, we only consider nodes for reclamation of the following
+  // conditions apply:
+  //
+  // (1) 1 predecessor (that has one successor)
+  // (2) 1 successor (that has one predecessor)
+  //
+  // If a node has no successor it is on the "frontier", while a node
+  // with no predecessor is a root.
+  //
+  // After these prerequisites, we discard all "filler" nodes that
+  // are used only for intermediate processing, and are not essential
+  // for analyzer history:
+  //
+  // (a) PreStmtPurgeDeadSymbols
+  //
+  // We then discard all other nodes where *all* of the following conditions
+  // apply:
+  //
+  // (3) The ProgramPoint is for a PostStmt, but not a PostStore.
+  // (4) There is no 'tag' for the ProgramPoint.
+  // (5) The 'store' is the same as the predecessor.
+  // (6) The 'GDM' is the same as the predecessor.
+  // (7) The LocationContext is the same as the predecessor.
+  // (8) Expressions that are *not* lvalue expressions.
+  // (9) The PostStmt isn't for a non-consumed Stmt or Expr.
+  // (10) The successor is not a CallExpr StmtPoint (so that we would
+  //      be able to find it when retrying a call with no inlining).
+  // FIXME: It may be safe to reclaim PreCall and PostCall nodes as well.
+
+  // Conditions 1 and 2.
+  if (node->pred_size() != 1 || node->succ_size() != 1)
+    return false;
+
+  const ExplodedNode *pred = *(node->pred_begin());
+  if (pred->succ_size() != 1)
+    return false;
+  
+  const ExplodedNode *succ = *(node->succ_begin());
+  if (succ->pred_size() != 1)
+    return false;
+
+  // Now reclaim any nodes that are (by definition) not essential to
+  // analysis history and are not consulted by any client code.
+  ProgramPoint progPoint = node->getLocation();
+  if (progPoint.getAs<PreStmtPurgeDeadSymbols>())
+    return !progPoint.getTag();
+
+  // Condition 3.
+  if (!progPoint.getAs<PostStmt>() || progPoint.getAs<PostStore>())
+    return false;
+
+  // Condition 4.
+  if (progPoint.getTag())
+    return false;
+
+  // Conditions 5, 6, and 7.
+  ProgramStateRef state = node->getState();
+  ProgramStateRef pred_state = pred->getState();    
+  if (state->store != pred_state->store || state->GDM != pred_state->GDM ||
+      progPoint.getLocationContext() != pred->getLocationContext())
+    return false;
+
+  // All further checks require expressions. As per #3, we know that we have
+  // a PostStmt.
+  const Expr *Ex = dyn_cast<Expr>(progPoint.castAs<PostStmt>().getStmt());
+  if (!Ex)
+    return false;
+
+  // Condition 8.
+  // Do not collect nodes for "interesting" lvalue expressions since they are
+  // used extensively for generating path diagnostics.
+  if (isInterestingLValueExpr(Ex))
+    return false;
+
+  // Condition 9.
+  // Do not collect nodes for non-consumed Stmt or Expr to ensure precise
+  // diagnostic generation; specifically, so that we could anchor arrows
+  // pointing to the beginning of statements (as written in code).
+  ParentMap &PM = progPoint.getLocationContext()->getParentMap();
+  if (!PM.isConsumedExpr(Ex))
+    return false;
+
+  // Condition 10.
+  const ProgramPoint SuccLoc = succ->getLocation();
+  if (Optional<StmtPoint> SP = SuccLoc.getAs<StmtPoint>())
+    if (CallEvent::isCallStmt(SP->getStmt()))
+      return false;
+
+  return true;
+}
+
+void ExplodedGraph::collectNode(ExplodedNode *node) {
+  // Removing a node means:
+  // (a) changing the predecessors successor to the successor of this node
+  // (b) changing the successors predecessor to the predecessor of this node
+  // (c) Putting 'node' onto freeNodes.
+  assert(node->pred_size() == 1 || node->succ_size() == 1);
+  ExplodedNode *pred = *(node->pred_begin());
+  ExplodedNode *succ = *(node->succ_begin());
+  pred->replaceSuccessor(succ);
+  succ->replacePredecessor(pred);
+  FreeNodes.push_back(node);
+  Nodes.RemoveNode(node);
+  --NumNodes;
+  node->~ExplodedNode();  
+}
+
+void ExplodedGraph::reclaimRecentlyAllocatedNodes() {
+  if (ChangedNodes.empty())
+    return;
+
+  // Only periodically reclaim nodes so that we can build up a set of
+  // nodes that meet the reclamation criteria.  Freshly created nodes
+  // by definition have no successor, and thus cannot be reclaimed (see below).
+  assert(ReclaimCounter > 0);
+  if (--ReclaimCounter != 0)
+    return;
+  ReclaimCounter = ReclaimNodeInterval;
+
+  for (NodeVector::iterator it = ChangedNodes.begin(), et = ChangedNodes.end();
+       it != et; ++it) {
+    ExplodedNode *node = *it;
+    if (shouldCollect(node))
+      collectNode(node);
+  }
+  ChangedNodes.clear();
+}
+
+//===----------------------------------------------------------------------===//
+// ExplodedNode.
+//===----------------------------------------------------------------------===//
+
+// An NodeGroup's storage type is actually very much like a TinyPtrVector:
+// it can be either a pointer to a single ExplodedNode, or a pointer to a
+// BumpVector allocated with the ExplodedGraph's allocator. This allows the
+// common case of single-node NodeGroups to be implemented with no extra memory.
+//
+// Consequently, each of the NodeGroup methods have up to four cases to handle:
+// 1. The flag is set and this group does not actually contain any nodes.
+// 2. The group is empty, in which case the storage value is null.
+// 3. The group contains a single node.
+// 4. The group contains more than one node.
+typedef BumpVector<ExplodedNode *> ExplodedNodeVector;
+typedef llvm::PointerUnion<ExplodedNode *, ExplodedNodeVector *> GroupStorage;
+
+void ExplodedNode::addPredecessor(ExplodedNode *V, ExplodedGraph &G) {
+  assert (!V->isSink());
+  Preds.addNode(V, G);
+  V->Succs.addNode(this, G);
+#ifndef NDEBUG
+  if (NodeAuditor) NodeAuditor->AddEdge(V, this);
+#endif
+}
+
+void ExplodedNode::NodeGroup::replaceNode(ExplodedNode *node) {
+  assert(!getFlag());
+
+  GroupStorage &Storage = reinterpret_cast<GroupStorage&>(P);
+  assert(Storage.is<ExplodedNode *>());
+  Storage = node;
+  assert(Storage.is<ExplodedNode *>());
+}
+
+void ExplodedNode::NodeGroup::addNode(ExplodedNode *N, ExplodedGraph &G) {
+  assert(!getFlag());
+
+  GroupStorage &Storage = reinterpret_cast<GroupStorage&>(P);
+  if (Storage.isNull()) {
+    Storage = N;
+    assert(Storage.is<ExplodedNode *>());
+    return;
+  }
+
+  ExplodedNodeVector *V = Storage.dyn_cast<ExplodedNodeVector *>();
+
+  if (!V) {
+    // Switch from single-node to multi-node representation.
+    ExplodedNode *Old = Storage.get<ExplodedNode *>();
+
+    BumpVectorContext &Ctx = G.getNodeAllocator();
+    V = G.getAllocator().Allocate<ExplodedNodeVector>();
+    new (V) ExplodedNodeVector(Ctx, 4);
+    V->push_back(Old, Ctx);
+
+    Storage = V;
+    assert(!getFlag());
+    assert(Storage.is<ExplodedNodeVector *>());
+  }
+
+  V->push_back(N, G.getNodeAllocator());
+}
+
+unsigned ExplodedNode::NodeGroup::size() const {
+  if (getFlag())
+    return 0;
+
+  const GroupStorage &Storage = reinterpret_cast<const GroupStorage &>(P);
+  if (Storage.isNull())
+    return 0;
+  if (ExplodedNodeVector *V = Storage.dyn_cast<ExplodedNodeVector *>())
+    return V->size();
+  return 1;
+}
+
+ExplodedNode * const *ExplodedNode::NodeGroup::begin() const {
+  if (getFlag())
+    return 0;
+
+  const GroupStorage &Storage = reinterpret_cast<const GroupStorage &>(P);
+  if (Storage.isNull())
+    return 0;
+  if (ExplodedNodeVector *V = Storage.dyn_cast<ExplodedNodeVector *>())
+    return V->begin();
+  return Storage.getAddrOfPtr1();
+}
+
+ExplodedNode * const *ExplodedNode::NodeGroup::end() const {
+  if (getFlag())
+    return 0;
+
+  const GroupStorage &Storage = reinterpret_cast<const GroupStorage &>(P);
+  if (Storage.isNull())
+    return 0;
+  if (ExplodedNodeVector *V = Storage.dyn_cast<ExplodedNodeVector *>())
+    return V->end();
+  return Storage.getAddrOfPtr1() + 1;
+}
+
+ExplodedNode *ExplodedGraph::getNode(const ProgramPoint &L,
+                                     ProgramStateRef State,
+                                     bool IsSink,
+                                     bool* IsNew) {
+  // Profile 'State' to determine if we already have an existing node.
+  llvm::FoldingSetNodeID profile;
+  void *InsertPos = 0;
+
+  NodeTy::Profile(profile, L, State, IsSink);
+  NodeTy* V = Nodes.FindNodeOrInsertPos(profile, InsertPos);
+
+  if (!V) {
+    if (!FreeNodes.empty()) {
+      V = FreeNodes.back();
+      FreeNodes.pop_back();
+    }
+    else {
+      // Allocate a new node.
+      V = (NodeTy*) getAllocator().Allocate<NodeTy>();
+    }
+
+    new (V) NodeTy(L, State, IsSink);
+
+    if (ReclaimNodeInterval)
+      ChangedNodes.push_back(V);
+
+    // Insert the node into the node set and return it.
+    Nodes.InsertNode(V, InsertPos);
+    ++NumNodes;
+
+    if (IsNew) *IsNew = true;
+  }
+  else
+    if (IsNew) *IsNew = false;
+
+  return V;
+}
+
+ExplodedGraph *
+ExplodedGraph::trim(ArrayRef<const NodeTy *> Sinks,
+                    InterExplodedGraphMap *ForwardMap,
+                    InterExplodedGraphMap *InverseMap) const{
+
+  if (Nodes.empty())
+    return 0;
+
+  typedef llvm::DenseSet<const ExplodedNode*> Pass1Ty;
+  Pass1Ty Pass1;
+
+  typedef InterExplodedGraphMap Pass2Ty;
+  InterExplodedGraphMap Pass2Scratch;
+  Pass2Ty &Pass2 = ForwardMap ? *ForwardMap : Pass2Scratch;
+
+  SmallVector<const ExplodedNode*, 10> WL1, WL2;
+
+  // ===- Pass 1 (reverse DFS) -===
+  for (ArrayRef<const NodeTy *>::iterator I = Sinks.begin(), E = Sinks.end();
+       I != E; ++I) {
+    if (*I)
+      WL1.push_back(*I);
+  }
+
+  // Process the first worklist until it is empty.
+  while (!WL1.empty()) {
+    const ExplodedNode *N = WL1.back();
+    WL1.pop_back();
+
+    // Have we already visited this node?  If so, continue to the next one.
+    if (Pass1.count(N))
+      continue;
+
+    // Otherwise, mark this node as visited.
+    Pass1.insert(N);
+
+    // If this is a root enqueue it to the second worklist.
+    if (N->Preds.empty()) {
+      WL2.push_back(N);
+      continue;
+    }
+
+    // Visit our predecessors and enqueue them.
+    for (ExplodedNode::pred_iterator I = N->Preds.begin(), E = N->Preds.end();
+         I != E; ++I)
+      WL1.push_back(*I);
+  }
+
+  // We didn't hit a root? Return with a null pointer for the new graph.
+  if (WL2.empty())
+    return 0;
+
+  // Create an empty graph.
+  ExplodedGraph* G = MakeEmptyGraph();
+
+  // ===- Pass 2 (forward DFS to construct the new graph) -===
+  while (!WL2.empty()) {
+    const ExplodedNode *N = WL2.back();
+    WL2.pop_back();
+
+    // Skip this node if we have already processed it.
+    if (Pass2.find(N) != Pass2.end())
+      continue;
+
+    // Create the corresponding node in the new graph and record the mapping
+    // from the old node to the new node.
+    ExplodedNode *NewN = G->getNode(N->getLocation(), N->State, N->isSink(), 0);
+    Pass2[N] = NewN;
+
+    // Also record the reverse mapping from the new node to the old node.
+    if (InverseMap) (*InverseMap)[NewN] = N;
+
+    // If this node is a root, designate it as such in the graph.
+    if (N->Preds.empty())
+      G->addRoot(NewN);
+
+    // In the case that some of the intended predecessors of NewN have already
+    // been created, we should hook them up as predecessors.
+
+    // Walk through the predecessors of 'N' and hook up their corresponding
+    // nodes in the new graph (if any) to the freshly created node.
+    for (ExplodedNode::pred_iterator I = N->Preds.begin(), E = N->Preds.end();
+         I != E; ++I) {
+      Pass2Ty::iterator PI = Pass2.find(*I);
+      if (PI == Pass2.end())
+        continue;
+
+      NewN->addPredecessor(const_cast<ExplodedNode *>(PI->second), *G);
+    }
+
+    // In the case that some of the intended successors of NewN have already
+    // been created, we should hook them up as successors.  Otherwise, enqueue
+    // the new nodes from the original graph that should have nodes created
+    // in the new graph.
+    for (ExplodedNode::succ_iterator I = N->Succs.begin(), E = N->Succs.end();
+         I != E; ++I) {
+      Pass2Ty::iterator PI = Pass2.find(*I);
+      if (PI != Pass2.end()) {
+        const_cast<ExplodedNode *>(PI->second)->addPredecessor(NewN, *G);
+        continue;
+      }
+
+      // Enqueue nodes to the worklist that were marked during pass 1.
+      if (Pass1.count(*I))
+        WL2.push_back(*I);
+    }
+  }
+
+  return G;
+}
+
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
new file mode 100644
index 0000000..bfe4e15
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -0,0 +1,2453 @@
+//=-- ExprEngine.cpp - Path-Sensitive Expression-Level Dataflow ---*- C++ -*-=
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines a meta-engine for path-sensitive dataflow analysis that
+//  is built on GREngine, but provides the boilerplate to execute transfer
+//  functions and build the ExplodedGraph at the expression level.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "ExprEngine"
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "llvm/ADT/ImmutableList.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/raw_ostream.h"
+
+#ifndef NDEBUG
+#include "llvm/Support/GraphWriter.h"
+#endif
+
+using namespace clang;
+using namespace ento;
+using llvm::APSInt;
+
+STATISTIC(NumRemoveDeadBindings,
+            "The # of times RemoveDeadBindings is called");
+STATISTIC(NumMaxBlockCountReached,
+            "The # of aborted paths due to reaching the maximum block count in "
+            "a top level function");
+STATISTIC(NumMaxBlockCountReachedInInlined,
+            "The # of aborted paths due to reaching the maximum block count in "
+            "an inlined function");
+STATISTIC(NumTimesRetriedWithoutInlining,
+            "The # of times we re-evaluated a call without inlining");
+
+//===----------------------------------------------------------------------===//
+// Engine construction and deletion.
+//===----------------------------------------------------------------------===//
+
+ExprEngine::ExprEngine(AnalysisManager &mgr, bool gcEnabled,
+                       SetOfConstDecls *VisitedCalleesIn,
+                       FunctionSummariesTy *FS,
+                       InliningModes HowToInlineIn)
+  : AMgr(mgr),
+    AnalysisDeclContexts(mgr.getAnalysisDeclContextManager()),
+    Engine(*this, FS),
+    G(Engine.getGraph()),
+    StateMgr(getContext(), mgr.getStoreManagerCreator(),
+             mgr.getConstraintManagerCreator(), G.getAllocator(),
+             this),
+    SymMgr(StateMgr.getSymbolManager()),
+    svalBuilder(StateMgr.getSValBuilder()),
+    currStmtIdx(0), currBldrCtx(0),
+    ObjCNoRet(mgr.getASTContext()),
+    ObjCGCEnabled(gcEnabled), BR(mgr, *this),
+    VisitedCallees(VisitedCalleesIn),
+    HowToInline(HowToInlineIn)
+{
+  unsigned TrimInterval = mgr.options.getGraphTrimInterval();
+  if (TrimInterval != 0) {
+    // Enable eager node reclaimation when constructing the ExplodedGraph.
+    G.enableNodeReclamation(TrimInterval);
+  }
+}
+
+ExprEngine::~ExprEngine() {
+  BR.FlushReports();
+}
+
+//===----------------------------------------------------------------------===//
+// Utility methods.
+//===----------------------------------------------------------------------===//
+
+ProgramStateRef ExprEngine::getInitialState(const LocationContext *InitLoc) {
+  ProgramStateRef state = StateMgr.getInitialState(InitLoc);
+  const Decl *D = InitLoc->getDecl();
+
+  // Preconditions.
+  // FIXME: It would be nice if we had a more general mechanism to add
+  // such preconditions.  Some day.
+  do {
+
+    if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+      // Precondition: the first argument of 'main' is an integer guaranteed
+      //  to be > 0.
+      const IdentifierInfo *II = FD->getIdentifier();
+      if (!II || !(II->getName() == "main" && FD->getNumParams() > 0))
+        break;
+
+      const ParmVarDecl *PD = FD->getParamDecl(0);
+      QualType T = PD->getType();
+      const BuiltinType *BT = dyn_cast<BuiltinType>(T);
+      if (!BT || !BT->isInteger())
+        break;
+
+      const MemRegion *R = state->getRegion(PD, InitLoc);
+      if (!R)
+        break;
+
+      SVal V = state->getSVal(loc::MemRegionVal(R));
+      SVal Constraint_untested = evalBinOp(state, BO_GT, V,
+                                           svalBuilder.makeZeroVal(T),
+                                           getContext().IntTy);
+
+      Optional<DefinedOrUnknownSVal> Constraint =
+          Constraint_untested.getAs<DefinedOrUnknownSVal>();
+
+      if (!Constraint)
+        break;
+
+      if (ProgramStateRef newState = state->assume(*Constraint, true))
+        state = newState;
+    }
+    break;
+  }
+  while (0);
+
+  if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+    // Precondition: 'self' is always non-null upon entry to an Objective-C
+    // method.
+    const ImplicitParamDecl *SelfD = MD->getSelfDecl();
+    const MemRegion *R = state->getRegion(SelfD, InitLoc);
+    SVal V = state->getSVal(loc::MemRegionVal(R));
+
+    if (Optional<Loc> LV = V.getAs<Loc>()) {
+      // Assume that the pointer value in 'self' is non-null.
+      state = state->assume(*LV, true);
+      assert(state && "'self' cannot be null");
+    }
+  }
+
+  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
+    if (!MD->isStatic()) {
+      // Precondition: 'this' is always non-null upon entry to the
+      // top-level function.  This is our starting assumption for
+      // analyzing an "open" program.
+      const StackFrameContext *SFC = InitLoc->getCurrentStackFrame();
+      if (SFC->getParent() == 0) {
+        loc::MemRegionVal L = svalBuilder.getCXXThis(MD, SFC);
+        SVal V = state->getSVal(L);
+        if (Optional<Loc> LV = V.getAs<Loc>()) {
+          state = state->assume(*LV, true);
+          assert(state && "'this' cannot be null");
+        }
+      }
+    }
+  }
+    
+  return state;
+}
+
+ProgramStateRef
+ExprEngine::createTemporaryRegionIfNeeded(ProgramStateRef State,
+                                          const LocationContext *LC,
+                                          const Expr *Ex,
+                                          const Expr *Result) {
+  SVal V = State->getSVal(Ex, LC);
+  if (!Result) {
+    // If we don't have an explicit result expression, we're in "if needed"
+    // mode. Only create a region if the current value is a NonLoc.
+    if (!V.getAs<NonLoc>())
+      return State;
+    Result = Ex;
+  } else {
+    // We need to create a region no matter what. For sanity, make sure we don't
+    // try to stuff a Loc into a non-pointer temporary region.
+    assert(!V.getAs<Loc>() || Loc::isLocType(Result->getType()) ||
+           Result->getType()->isMemberPointerType());
+  }
+
+  ProgramStateManager &StateMgr = State->getStateManager();
+  MemRegionManager &MRMgr = StateMgr.getRegionManager();
+  StoreManager &StoreMgr = StateMgr.getStoreManager();
+
+  // We need to be careful about treating a derived type's value as
+  // bindings for a base type. Unless we're creating a temporary pointer region,
+  // start by stripping and recording base casts.
+  SmallVector<const CastExpr *, 4> Casts;
+  const Expr *Inner = Ex->IgnoreParens();
+  if (!Loc::isLocType(Result->getType())) {
+    while (const CastExpr *CE = dyn_cast<CastExpr>(Inner)) {
+      if (CE->getCastKind() == CK_DerivedToBase ||
+          CE->getCastKind() == CK_UncheckedDerivedToBase)
+        Casts.push_back(CE);
+      else if (CE->getCastKind() != CK_NoOp)
+        break;
+
+      Inner = CE->getSubExpr()->IgnoreParens();
+    }
+  }
+
+  // Create a temporary object region for the inner expression (which may have
+  // a more derived type) and bind the value into it.
+  const TypedValueRegion *TR = MRMgr.getCXXTempObjectRegion(Inner, LC);
+  SVal Reg = loc::MemRegionVal(TR);
+
+  if (V.isUnknown())
+    V = getSValBuilder().conjureSymbolVal(Result, LC, TR->getValueType(),
+                                          currBldrCtx->blockCount());
+  State = State->bindLoc(Reg, V);
+
+  // Re-apply the casts (from innermost to outermost) for type sanity.
+  for (SmallVectorImpl<const CastExpr *>::reverse_iterator I = Casts.rbegin(),
+                                                           E = Casts.rend();
+       I != E; ++I) {
+    Reg = StoreMgr.evalDerivedToBase(Reg, *I);
+  }
+
+  State = State->BindExpr(Result, LC, Reg);
+  return State;
+}
+
+//===----------------------------------------------------------------------===//
+// Top-level transfer function logic (Dispatcher).
+//===----------------------------------------------------------------------===//
+
+/// evalAssume - Called by ConstraintManager. Used to call checker-specific
+///  logic for handling assumptions on symbolic values.
+ProgramStateRef ExprEngine::processAssume(ProgramStateRef state,
+                                              SVal cond, bool assumption) {
+  return getCheckerManager().runCheckersForEvalAssume(state, cond, assumption);
+}
+
+bool ExprEngine::wantsRegionChangeUpdate(ProgramStateRef state) {
+  return getCheckerManager().wantsRegionChangeUpdate(state);
+}
+
+ProgramStateRef 
+ExprEngine::processRegionChanges(ProgramStateRef state,
+                                 const InvalidatedSymbols *invalidated,
+                                 ArrayRef<const MemRegion *> Explicits,
+                                 ArrayRef<const MemRegion *> Regions,
+                                 const CallEvent *Call) {
+  return getCheckerManager().runCheckersForRegionChanges(state, invalidated,
+                                                      Explicits, Regions, Call);
+}
+
+void ExprEngine::printState(raw_ostream &Out, ProgramStateRef State,
+                            const char *NL, const char *Sep) {
+  getCheckerManager().runCheckersForPrintState(Out, State, NL, Sep);
+}
+
+void ExprEngine::processEndWorklist(bool hasWorkRemaining) {
+  getCheckerManager().runCheckersForEndAnalysis(G, BR, *this);
+}
+
+void ExprEngine::processCFGElement(const CFGElement E, ExplodedNode *Pred,
+                                   unsigned StmtIdx, NodeBuilderContext *Ctx) {
+  currStmtIdx = StmtIdx;
+  currBldrCtx = Ctx;
+
+  switch (E.getKind()) {
+    case CFGElement::Statement:
+      ProcessStmt(const_cast<Stmt*>(E.castAs<CFGStmt>().getStmt()), Pred);
+      return;
+    case CFGElement::Initializer:
+      ProcessInitializer(E.castAs<CFGInitializer>().getInitializer(), Pred);
+      return;
+    case CFGElement::AutomaticObjectDtor:
+    case CFGElement::BaseDtor:
+    case CFGElement::MemberDtor:
+    case CFGElement::TemporaryDtor:
+      ProcessImplicitDtor(E.castAs<CFGImplicitDtor>(), Pred);
+      return;
+  }
+  currBldrCtx = 0;
+}
+
+static bool shouldRemoveDeadBindings(AnalysisManager &AMgr,
+                                     const CFGStmt S,
+                                     const ExplodedNode *Pred,
+                                     const LocationContext *LC) {
+  
+  // Are we never purging state values?
+  if (AMgr.options.AnalysisPurgeOpt == PurgeNone)
+    return false;
+
+  // Is this the beginning of a basic block?
+  if (Pred->getLocation().getAs<BlockEntrance>())
+    return true;
+
+  // Is this on a non-expression?
+  if (!isa<Expr>(S.getStmt()))
+    return true;
+    
+  // Run before processing a call.
+  if (CallEvent::isCallStmt(S.getStmt()))
+    return true;
+
+  // Is this an expression that is consumed by another expression?  If so,
+  // postpone cleaning out the state.
+  ParentMap &PM = LC->getAnalysisDeclContext()->getParentMap();
+  return !PM.isConsumedExpr(cast<Expr>(S.getStmt()));
+}
+
+void ExprEngine::removeDead(ExplodedNode *Pred, ExplodedNodeSet &Out,
+                            const Stmt *ReferenceStmt,
+                            const LocationContext *LC,
+                            const Stmt *DiagnosticStmt,
+                            ProgramPoint::Kind K) {
+  assert((K == ProgramPoint::PreStmtPurgeDeadSymbolsKind ||
+          ReferenceStmt == 0 || isa<ReturnStmt>(ReferenceStmt))
+          && "PostStmt is not generally supported by the SymbolReaper yet");
+  assert(LC && "Must pass the current (or expiring) LocationContext");
+
+  if (!DiagnosticStmt) {
+    DiagnosticStmt = ReferenceStmt;
+    assert(DiagnosticStmt && "Required for clearing a LocationContext");
+  }
+
+  NumRemoveDeadBindings++;
+  ProgramStateRef CleanedState = Pred->getState();
+
+  // LC is the location context being destroyed, but SymbolReaper wants a
+  // location context that is still live. (If this is the top-level stack
+  // frame, this will be null.)
+  if (!ReferenceStmt) {
+    assert(K == ProgramPoint::PostStmtPurgeDeadSymbolsKind &&
+           "Use PostStmtPurgeDeadSymbolsKind for clearing a LocationContext");
+    LC = LC->getParent();
+  }
+
+  const StackFrameContext *SFC = LC ? LC->getCurrentStackFrame() : 0;
+  SymbolReaper SymReaper(SFC, ReferenceStmt, SymMgr, getStoreManager());
+
+  getCheckerManager().runCheckersForLiveSymbols(CleanedState, SymReaper);
+
+  // Create a state in which dead bindings are removed from the environment
+  // and the store. TODO: The function should just return new env and store,
+  // not a new state.
+  CleanedState = StateMgr.removeDeadBindings(CleanedState, SFC, SymReaper);
+
+  // Process any special transfer function for dead symbols.
+  // A tag to track convenience transitions, which can be removed at cleanup.
+  static SimpleProgramPointTag cleanupTag("ExprEngine : Clean Node");
+  if (!SymReaper.hasDeadSymbols()) {
+    // Generate a CleanedNode that has the environment and store cleaned
+    // up. Since no symbols are dead, we can optimize and not clean out
+    // the constraint manager.
+    StmtNodeBuilder Bldr(Pred, Out, *currBldrCtx);
+    Bldr.generateNode(DiagnosticStmt, Pred, CleanedState, &cleanupTag, K);
+
+  } else {
+    // Call checkers with the non-cleaned state so that they could query the
+    // values of the soon to be dead symbols.
+    ExplodedNodeSet CheckedSet;
+    getCheckerManager().runCheckersForDeadSymbols(CheckedSet, Pred, SymReaper,
+                                                  DiagnosticStmt, *this, K);
+
+    // For each node in CheckedSet, generate CleanedNodes that have the
+    // environment, the store, and the constraints cleaned up but have the
+    // user-supplied states as the predecessors.
+    StmtNodeBuilder Bldr(CheckedSet, Out, *currBldrCtx);
+    for (ExplodedNodeSet::const_iterator
+          I = CheckedSet.begin(), E = CheckedSet.end(); I != E; ++I) {
+      ProgramStateRef CheckerState = (*I)->getState();
+
+      // The constraint manager has not been cleaned up yet, so clean up now.
+      CheckerState = getConstraintManager().removeDeadBindings(CheckerState,
+                                                               SymReaper);
+
+      assert(StateMgr.haveEqualEnvironments(CheckerState, Pred->getState()) &&
+        "Checkers are not allowed to modify the Environment as a part of "
+        "checkDeadSymbols processing.");
+      assert(StateMgr.haveEqualStores(CheckerState, Pred->getState()) &&
+        "Checkers are not allowed to modify the Store as a part of "
+        "checkDeadSymbols processing.");
+
+      // Create a state based on CleanedState with CheckerState GDM and
+      // generate a transition to that state.
+      ProgramStateRef CleanedCheckerSt =
+        StateMgr.getPersistentStateWithGDM(CleanedState, CheckerState);
+      Bldr.generateNode(DiagnosticStmt, *I, CleanedCheckerSt, &cleanupTag, K);
+    }
+  }
+}
+
+void ExprEngine::ProcessStmt(const CFGStmt S,
+                             ExplodedNode *Pred) {
+  // Reclaim any unnecessary nodes in the ExplodedGraph.
+  G.reclaimRecentlyAllocatedNodes();
+
+  const Stmt *currStmt = S.getStmt();
+  PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
+                                currStmt->getLocStart(),
+                                "Error evaluating statement");
+
+  // Remove dead bindings and symbols.
+  ExplodedNodeSet CleanedStates;
+  if (shouldRemoveDeadBindings(AMgr, S, Pred, Pred->getLocationContext())){
+    removeDead(Pred, CleanedStates, currStmt, Pred->getLocationContext());
+  } else
+    CleanedStates.Add(Pred);
+
+  // Visit the statement.
+  ExplodedNodeSet Dst;
+  for (ExplodedNodeSet::iterator I = CleanedStates.begin(),
+                                 E = CleanedStates.end(); I != E; ++I) {
+    ExplodedNodeSet DstI;
+    // Visit the statement.
+    Visit(currStmt, *I, DstI);
+    Dst.insert(DstI);
+  }
+
+  // Enqueue the new nodes onto the work list.
+  Engine.enqueue(Dst, currBldrCtx->getBlock(), currStmtIdx);
+}
+
+void ExprEngine::ProcessInitializer(const CFGInitializer Init,
+                                    ExplodedNode *Pred) {
+  const CXXCtorInitializer *BMI = Init.getInitializer();
+
+  PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
+                                BMI->getSourceLocation(),
+                                "Error evaluating initializer");
+
+  // We don't clean up dead bindings here.
+  const StackFrameContext *stackFrame =
+                           cast<StackFrameContext>(Pred->getLocationContext());
+  const CXXConstructorDecl *decl =
+                           cast<CXXConstructorDecl>(stackFrame->getDecl());
+
+  ProgramStateRef State = Pred->getState();
+  SVal thisVal = State->getSVal(svalBuilder.getCXXThis(decl, stackFrame));
+
+  ExplodedNodeSet Tmp(Pred);
+  SVal FieldLoc;
+
+  // Evaluate the initializer, if necessary
+  if (BMI->isAnyMemberInitializer()) {
+    // Constructors build the object directly in the field,
+    // but non-objects must be copied in from the initializer.
+    const Expr *Init = BMI->getInit()->IgnoreImplicit();
+    if (!isa<CXXConstructExpr>(Init)) {
+      const ValueDecl *Field;
+      if (BMI->isIndirectMemberInitializer()) {
+        Field = BMI->getIndirectMember();
+        FieldLoc = State->getLValue(BMI->getIndirectMember(), thisVal);
+      } else {
+        Field = BMI->getMember();
+        FieldLoc = State->getLValue(BMI->getMember(), thisVal);
+      }
+
+      SVal InitVal;
+      if (BMI->getNumArrayIndices() > 0) {
+        // Handle arrays of trivial type. We can represent this with a
+        // primitive load/copy from the base array region.
+        const ArraySubscriptExpr *ASE;
+        while ((ASE = dyn_cast<ArraySubscriptExpr>(Init)))
+          Init = ASE->getBase()->IgnoreImplicit();
+
+        SVal LValue = State->getSVal(Init, stackFrame);
+        if (Optional<Loc> LValueLoc = LValue.getAs<Loc>())
+          InitVal = State->getSVal(*LValueLoc);
+
+        // If we fail to get the value for some reason, use a symbolic value.
+        if (InitVal.isUnknownOrUndef()) {
+          SValBuilder &SVB = getSValBuilder();
+          InitVal = SVB.conjureSymbolVal(BMI->getInit(), stackFrame,
+                                         Field->getType(),
+                                         currBldrCtx->blockCount());
+        }
+      } else {
+        InitVal = State->getSVal(BMI->getInit(), stackFrame);
+      }
+
+      assert(Tmp.size() == 1 && "have not generated any new nodes yet");
+      assert(*Tmp.begin() == Pred && "have not generated any new nodes yet");
+      Tmp.clear();
+      
+      PostInitializer PP(BMI, FieldLoc.getAsRegion(), stackFrame);
+      evalBind(Tmp, Init, Pred, FieldLoc, InitVal, /*isInit=*/true, &PP);
+    }
+  } else {
+    assert(BMI->isBaseInitializer() || BMI->isDelegatingInitializer());
+    // We already did all the work when visiting the CXXConstructExpr.
+  }
+
+  // Construct PostInitializer nodes whether the state changed or not,
+  // so that the diagnostics don't get confused.
+  PostInitializer PP(BMI, FieldLoc.getAsRegion(), stackFrame);
+  ExplodedNodeSet Dst;
+  NodeBuilder Bldr(Tmp, Dst, *currBldrCtx);
+  for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I != E; ++I) {
+    ExplodedNode *N = *I;
+    Bldr.generateNode(PP, N->getState(), N);
+  }
+
+  // Enqueue the new nodes onto the work list.
+  Engine.enqueue(Dst, currBldrCtx->getBlock(), currStmtIdx);
+}
+
+void ExprEngine::ProcessImplicitDtor(const CFGImplicitDtor D,
+                                     ExplodedNode *Pred) {
+  ExplodedNodeSet Dst;
+  switch (D.getKind()) {
+  case CFGElement::AutomaticObjectDtor:
+    ProcessAutomaticObjDtor(D.castAs<CFGAutomaticObjDtor>(), Pred, Dst);
+    break;
+  case CFGElement::BaseDtor:
+    ProcessBaseDtor(D.castAs<CFGBaseDtor>(), Pred, Dst);
+    break;
+  case CFGElement::MemberDtor:
+    ProcessMemberDtor(D.castAs<CFGMemberDtor>(), Pred, Dst);
+    break;
+  case CFGElement::TemporaryDtor:
+    ProcessTemporaryDtor(D.castAs<CFGTemporaryDtor>(), Pred, Dst);
+    break;
+  default:
+    llvm_unreachable("Unexpected dtor kind.");
+  }
+
+  // Enqueue the new nodes onto the work list.
+  Engine.enqueue(Dst, currBldrCtx->getBlock(), currStmtIdx);
+}
+
+void ExprEngine::ProcessAutomaticObjDtor(const CFGAutomaticObjDtor Dtor,
+                                         ExplodedNode *Pred,
+                                         ExplodedNodeSet &Dst) {
+  const VarDecl *varDecl = Dtor.getVarDecl();
+  QualType varType = varDecl->getType();
+
+  ProgramStateRef state = Pred->getState();
+  SVal dest = state->getLValue(varDecl, Pred->getLocationContext());
+  const MemRegion *Region = dest.castAs<loc::MemRegionVal>().getRegion();
+
+  if (const ReferenceType *refType = varType->getAs<ReferenceType>()) {
+    varType = refType->getPointeeType();
+    Region = state->getSVal(Region).getAsRegion();
+  }
+
+  VisitCXXDestructor(varType, Region, Dtor.getTriggerStmt(), /*IsBase=*/ false,
+                     Pred, Dst);
+}
+
+void ExprEngine::ProcessBaseDtor(const CFGBaseDtor D,
+                                 ExplodedNode *Pred, ExplodedNodeSet &Dst) {
+  const LocationContext *LCtx = Pred->getLocationContext();
+  ProgramStateRef State = Pred->getState();
+
+  const CXXDestructorDecl *CurDtor = cast<CXXDestructorDecl>(LCtx->getDecl());
+  Loc ThisPtr = getSValBuilder().getCXXThis(CurDtor,
+                                            LCtx->getCurrentStackFrame());
+  SVal ThisVal = Pred->getState()->getSVal(ThisPtr);
+
+  // Create the base object region.
+  const CXXBaseSpecifier *Base = D.getBaseSpecifier();
+  QualType BaseTy = Base->getType();
+  SVal BaseVal = getStoreManager().evalDerivedToBase(ThisVal, BaseTy,
+                                                     Base->isVirtual());
+
+  VisitCXXDestructor(BaseTy, BaseVal.castAs<loc::MemRegionVal>().getRegion(),
+                     CurDtor->getBody(), /*IsBase=*/ true, Pred, Dst);
+}
+
+void ExprEngine::ProcessMemberDtor(const CFGMemberDtor D,
+                                   ExplodedNode *Pred, ExplodedNodeSet &Dst) {
+  const FieldDecl *Member = D.getFieldDecl();
+  ProgramStateRef State = Pred->getState();
+  const LocationContext *LCtx = Pred->getLocationContext();
+
+  const CXXDestructorDecl *CurDtor = cast<CXXDestructorDecl>(LCtx->getDecl());
+  Loc ThisVal = getSValBuilder().getCXXThis(CurDtor,
+                                            LCtx->getCurrentStackFrame());
+  SVal FieldVal =
+      State->getLValue(Member, State->getSVal(ThisVal).castAs<Loc>());
+
+  VisitCXXDestructor(Member->getType(),
+                     FieldVal.castAs<loc::MemRegionVal>().getRegion(),
+                     CurDtor->getBody(), /*IsBase=*/false, Pred, Dst);
+}
+
+void ExprEngine::ProcessTemporaryDtor(const CFGTemporaryDtor D,
+                                      ExplodedNode *Pred,
+                                      ExplodedNodeSet &Dst) {}
+
+void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
+                       ExplodedNodeSet &DstTop) {
+  PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
+                                S->getLocStart(),
+                                "Error evaluating statement");
+  ExplodedNodeSet Dst;
+  StmtNodeBuilder Bldr(Pred, DstTop, *currBldrCtx);
+
+  assert(!isa<Expr>(S) || S == cast<Expr>(S)->IgnoreParens());
+
+  switch (S->getStmtClass()) {
+    // C++ and ARC stuff we don't support yet.
+    case Expr::ObjCIndirectCopyRestoreExprClass:
+    case Stmt::CXXDefaultInitExprClass:
+    case Stmt::CXXDependentScopeMemberExprClass:
+    case Stmt::CXXPseudoDestructorExprClass:
+    case Stmt::CXXTryStmtClass:
+    case Stmt::CXXTypeidExprClass:
+    case Stmt::CXXUuidofExprClass:
+    case Stmt::MSPropertyRefExprClass:
+    case Stmt::CXXUnresolvedConstructExprClass:
+    case Stmt::DependentScopeDeclRefExprClass:
+    case Stmt::UnaryTypeTraitExprClass:
+    case Stmt::BinaryTypeTraitExprClass:
+    case Stmt::TypeTraitExprClass:
+    case Stmt::ArrayTypeTraitExprClass:
+    case Stmt::ExpressionTraitExprClass:
+    case Stmt::UnresolvedLookupExprClass:
+    case Stmt::UnresolvedMemberExprClass:
+    case Stmt::CXXNoexceptExprClass:
+    case Stmt::PackExpansionExprClass:
+    case Stmt::SubstNonTypeTemplateParmPackExprClass:
+    case Stmt::FunctionParmPackExprClass:
+    case Stmt::SEHTryStmtClass:
+    case Stmt::SEHExceptStmtClass:
+    case Stmt::LambdaExprClass:
+    case Stmt::SEHFinallyStmtClass: {
+      const ExplodedNode *node = Bldr.generateSink(S, Pred, Pred->getState());
+      Engine.addAbortedBlock(node, currBldrCtx->getBlock());
+      break;
+    }
+    
+    case Stmt::ParenExprClass:
+      llvm_unreachable("ParenExprs already handled.");
+    case Stmt::GenericSelectionExprClass:
+      llvm_unreachable("GenericSelectionExprs already handled.");
+    // Cases that should never be evaluated simply because they shouldn't
+    // appear in the CFG.
+    case Stmt::BreakStmtClass:
+    case Stmt::CaseStmtClass:
+    case Stmt::CompoundStmtClass:
+    case Stmt::ContinueStmtClass:
+    case Stmt::CXXForRangeStmtClass:
+    case Stmt::DefaultStmtClass:
+    case Stmt::DoStmtClass:
+    case Stmt::ForStmtClass:
+    case Stmt::GotoStmtClass:
+    case Stmt::IfStmtClass:
+    case Stmt::IndirectGotoStmtClass:
+    case Stmt::LabelStmtClass:
+    case Stmt::AttributedStmtClass:
+    case Stmt::NoStmtClass:
+    case Stmt::NullStmtClass:
+    case Stmt::SwitchStmtClass:
+    case Stmt::WhileStmtClass:
+    case Expr::MSDependentExistsStmtClass:
+    case Stmt::CapturedStmtClass:
+      llvm_unreachable("Stmt should not be in analyzer evaluation loop");
+
+    case Stmt::ObjCSubscriptRefExprClass:
+    case Stmt::ObjCPropertyRefExprClass:
+      llvm_unreachable("These are handled by PseudoObjectExpr");
+
+    case Stmt::GNUNullExprClass: {
+      // GNU __null is a pointer-width integer, not an actual pointer.
+      ProgramStateRef state = Pred->getState();
+      state = state->BindExpr(S, Pred->getLocationContext(),
+                              svalBuilder.makeIntValWithPtrWidth(0, false));
+      Bldr.generateNode(S, Pred, state);
+      break;
+    }
+
+    case Stmt::ObjCAtSynchronizedStmtClass:
+      Bldr.takeNodes(Pred);
+      VisitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(S), Pred, Dst);
+      Bldr.addNodes(Dst);
+      break;
+
+    case Stmt::ExprWithCleanupsClass:
+      // Handled due to fully linearised CFG.
+      break;
+
+    // Cases not handled yet; but will handle some day.
+    case Stmt::DesignatedInitExprClass:
+    case Stmt::ExtVectorElementExprClass:
+    case Stmt::ImaginaryLiteralClass:
+    case Stmt::ObjCAtCatchStmtClass:
+    case Stmt::ObjCAtFinallyStmtClass:
+    case Stmt::ObjCAtTryStmtClass:
+    case Stmt::ObjCAutoreleasePoolStmtClass:
+    case Stmt::ObjCEncodeExprClass:
+    case Stmt::ObjCIsaExprClass:
+    case Stmt::ObjCProtocolExprClass:
+    case Stmt::ObjCSelectorExprClass:
+    case Stmt::ParenListExprClass:
+    case Stmt::PredefinedExprClass:
+    case Stmt::ShuffleVectorExprClass:
+    case Stmt::VAArgExprClass:
+    case Stmt::CUDAKernelCallExprClass:
+    case Stmt::OpaqueValueExprClass:
+    case Stmt::AsTypeExprClass:
+    case Stmt::AtomicExprClass:
+      // Fall through.
+
+    // Cases we intentionally don't evaluate, since they don't need
+    // to be explicitly evaluated.
+    case Stmt::AddrLabelExprClass:
+    case Stmt::IntegerLiteralClass:
+    case Stmt::CharacterLiteralClass:
+    case Stmt::ImplicitValueInitExprClass:
+    case Stmt::CXXScalarValueInitExprClass:
+    case Stmt::CXXBoolLiteralExprClass:
+    case Stmt::ObjCBoolLiteralExprClass:
+    case Stmt::FloatingLiteralClass:
+    case Stmt::SizeOfPackExprClass:
+    case Stmt::StringLiteralClass:
+    case Stmt::ObjCStringLiteralClass:
+    case Stmt::CXXBindTemporaryExprClass:
+    case Stmt::SubstNonTypeTemplateParmExprClass:
+    case Stmt::CXXNullPtrLiteralExprClass: {
+      Bldr.takeNodes(Pred);
+      ExplodedNodeSet preVisit;
+      getCheckerManager().runCheckersForPreStmt(preVisit, Pred, S, *this);
+      getCheckerManager().runCheckersForPostStmt(Dst, preVisit, S, *this);
+      Bldr.addNodes(Dst);
+      break;
+    }
+
+    case Stmt::CXXDefaultArgExprClass: {
+      Bldr.takeNodes(Pred);
+      ExplodedNodeSet PreVisit;
+      getCheckerManager().runCheckersForPreStmt(PreVisit, Pred, S, *this);
+
+      ExplodedNodeSet Tmp;
+      StmtNodeBuilder Bldr2(PreVisit, Tmp, *currBldrCtx);
+
+      const LocationContext *LCtx = Pred->getLocationContext();
+      const CXXDefaultArgExpr *DefaultE = cast<CXXDefaultArgExpr>(S);
+      const Expr *ArgE = DefaultE->getExpr();
+
+      bool IsTemporary = false;
+      if (const MaterializeTemporaryExpr *MTE =
+            dyn_cast<MaterializeTemporaryExpr>(ArgE)) {
+        ArgE = MTE->GetTemporaryExpr();
+        IsTemporary = true;
+      }
+
+      Optional<SVal> ConstantVal = svalBuilder.getConstantVal(ArgE);
+      if (!ConstantVal)
+        ConstantVal = UnknownVal();
+
+      for (ExplodedNodeSet::iterator I = PreVisit.begin(), E = PreVisit.end();
+           I != E; ++I) {
+        ProgramStateRef State = (*I)->getState();
+        State = State->BindExpr(DefaultE, LCtx, *ConstantVal);
+        if (IsTemporary)
+          State = createTemporaryRegionIfNeeded(State, LCtx, DefaultE,
+                                                DefaultE);
+        Bldr2.generateNode(S, *I, State);
+      }
+
+      getCheckerManager().runCheckersForPostStmt(Dst, Tmp, S, *this);
+      Bldr.addNodes(Dst);
+      break;
+    }
+
+    case Expr::ObjCArrayLiteralClass:
+    case Expr::ObjCDictionaryLiteralClass:
+      // FIXME: explicitly model with a region and the actual contents
+      // of the container.  For now, conjure a symbol.
+    case Expr::ObjCBoxedExprClass: {
+      Bldr.takeNodes(Pred);
+
+      ExplodedNodeSet preVisit;
+      getCheckerManager().runCheckersForPreStmt(preVisit, Pred, S, *this);
+      
+      ExplodedNodeSet Tmp;
+      StmtNodeBuilder Bldr2(preVisit, Tmp, *currBldrCtx);
+
+      const Expr *Ex = cast<Expr>(S);
+      QualType resultType = Ex->getType();
+
+      for (ExplodedNodeSet::iterator it = preVisit.begin(), et = preVisit.end();
+           it != et; ++it) {      
+        ExplodedNode *N = *it;
+        const LocationContext *LCtx = N->getLocationContext();
+        SVal result = svalBuilder.conjureSymbolVal(0, Ex, LCtx, resultType,
+                                                   currBldrCtx->blockCount());
+        ProgramStateRef state = N->getState()->BindExpr(Ex, LCtx, result);
+        Bldr2.generateNode(S, N, state);
+      }
+      
+      getCheckerManager().runCheckersForPostStmt(Dst, Tmp, S, *this);
+      Bldr.addNodes(Dst);
+      break;      
+    }
+
+    case Stmt::ArraySubscriptExprClass:
+      Bldr.takeNodes(Pred);
+      VisitLvalArraySubscriptExpr(cast<ArraySubscriptExpr>(S), Pred, Dst);
+      Bldr.addNodes(Dst);
+      break;
+
+    case Stmt::GCCAsmStmtClass:
+      Bldr.takeNodes(Pred);
+      VisitGCCAsmStmt(cast<GCCAsmStmt>(S), Pred, Dst);
+      Bldr.addNodes(Dst);
+      break;
+
+    case Stmt::MSAsmStmtClass:
+      Bldr.takeNodes(Pred);
+      VisitMSAsmStmt(cast<MSAsmStmt>(S), Pred, Dst);
+      Bldr.addNodes(Dst);
+      break;
+
+    case Stmt::BlockExprClass:
+      Bldr.takeNodes(Pred);
+      VisitBlockExpr(cast<BlockExpr>(S), Pred, Dst);
+      Bldr.addNodes(Dst);
+      break;
+
+    case Stmt::BinaryOperatorClass: {
+      const BinaryOperator* B = cast<BinaryOperator>(S);
+      if (B->isLogicalOp()) {
+        Bldr.takeNodes(Pred);
+        VisitLogicalExpr(B, Pred, Dst);
+        Bldr.addNodes(Dst);
+        break;
+      }
+      else if (B->getOpcode() == BO_Comma) {
+        ProgramStateRef state = Pred->getState();
+        Bldr.generateNode(B, Pred,
+                          state->BindExpr(B, Pred->getLocationContext(),
+                                          state->getSVal(B->getRHS(),
+                                                  Pred->getLocationContext())));
+        break;
+      }
+
+      Bldr.takeNodes(Pred);
+      
+      if (AMgr.options.eagerlyAssumeBinOpBifurcation &&
+          (B->isRelationalOp() || B->isEqualityOp())) {
+        ExplodedNodeSet Tmp;
+        VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Tmp);
+        evalEagerlyAssumeBinOpBifurcation(Dst, Tmp, cast<Expr>(S));
+      }
+      else
+        VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Dst);
+
+      Bldr.addNodes(Dst);
+      break;
+    }
+
+    case Stmt::CXXOperatorCallExprClass: {
+      const CXXOperatorCallExpr *OCE = cast<CXXOperatorCallExpr>(S);
+
+      // For instance method operators, make sure the 'this' argument has a
+      // valid region.
+      const Decl *Callee = OCE->getCalleeDecl();
+      if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(Callee)) {
+        if (MD->isInstance()) {
+          ProgramStateRef State = Pred->getState();
+          const LocationContext *LCtx = Pred->getLocationContext();
+          ProgramStateRef NewState =
+            createTemporaryRegionIfNeeded(State, LCtx, OCE->getArg(0));
+          if (NewState != State) {
+            Pred = Bldr.generateNode(OCE, Pred, NewState, /*Tag=*/0,
+                                     ProgramPoint::PreStmtKind);
+            // Did we cache out?
+            if (!Pred)
+              break;
+          }
+        }
+      }
+      // FALLTHROUGH
+    }
+    case Stmt::CallExprClass:
+    case Stmt::CXXMemberCallExprClass:
+    case Stmt::UserDefinedLiteralClass: {
+      Bldr.takeNodes(Pred);
+      VisitCallExpr(cast<CallExpr>(S), Pred, Dst);
+      Bldr.addNodes(Dst);
+      break;
+    }
+    
+    case Stmt::CXXCatchStmtClass: {
+      Bldr.takeNodes(Pred);
+      VisitCXXCatchStmt(cast<CXXCatchStmt>(S), Pred, Dst);
+      Bldr.addNodes(Dst);
+      break;
+    }
+
+    case Stmt::CXXTemporaryObjectExprClass:
+    case Stmt::CXXConstructExprClass: {      
+      Bldr.takeNodes(Pred);
+      VisitCXXConstructExpr(cast<CXXConstructExpr>(S), Pred, Dst);
+      Bldr.addNodes(Dst);
+      break;
+    }
+
+    case Stmt::CXXNewExprClass: {
+      Bldr.takeNodes(Pred);
+      ExplodedNodeSet PostVisit;
+      VisitCXXNewExpr(cast<CXXNewExpr>(S), Pred, PostVisit);
+      getCheckerManager().runCheckersForPostStmt(Dst, PostVisit, S, *this);
+      Bldr.addNodes(Dst);
+      break;
+    }
+
+    case Stmt::CXXDeleteExprClass: {
+      Bldr.takeNodes(Pred);
+      ExplodedNodeSet PreVisit;
+      const CXXDeleteExpr *CDE = cast<CXXDeleteExpr>(S);
+      getCheckerManager().runCheckersForPreStmt(PreVisit, Pred, S, *this);
+
+      for (ExplodedNodeSet::iterator i = PreVisit.begin(), 
+                                     e = PreVisit.end(); i != e ; ++i)
+        VisitCXXDeleteExpr(CDE, *i, Dst);
+
+      Bldr.addNodes(Dst);
+      break;
+    }
+      // FIXME: ChooseExpr is really a constant.  We need to fix
+      //        the CFG do not model them as explicit control-flow.
+
+    case Stmt::ChooseExprClass: { // __builtin_choose_expr
+      Bldr.takeNodes(Pred);
+      const ChooseExpr *C = cast<ChooseExpr>(S);
+      VisitGuardedExpr(C, C->getLHS(), C->getRHS(), Pred, Dst);
+      Bldr.addNodes(Dst);
+      break;
+    }
+
+    case Stmt::CompoundAssignOperatorClass:
+      Bldr.takeNodes(Pred);
+      VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Dst);
+      Bldr.addNodes(Dst);
+      break;
+
+    case Stmt::CompoundLiteralExprClass:
+      Bldr.takeNodes(Pred);
+      VisitCompoundLiteralExpr(cast<CompoundLiteralExpr>(S), Pred, Dst);
+      Bldr.addNodes(Dst);
+      break;
+
+    case Stmt::BinaryConditionalOperatorClass:
+    case Stmt::ConditionalOperatorClass: { // '?' operator
+      Bldr.takeNodes(Pred);
+      const AbstractConditionalOperator *C
+        = cast<AbstractConditionalOperator>(S);
+      VisitGuardedExpr(C, C->getTrueExpr(), C->getFalseExpr(), Pred, Dst);
+      Bldr.addNodes(Dst);
+      break;
+    }
+
+    case Stmt::CXXThisExprClass:
+      Bldr.takeNodes(Pred);
+      VisitCXXThisExpr(cast<CXXThisExpr>(S), Pred, Dst);
+      Bldr.addNodes(Dst);
+      break;
+
+    case Stmt::DeclRefExprClass: {
+      Bldr.takeNodes(Pred);
+      const DeclRefExpr *DE = cast<DeclRefExpr>(S);
+      VisitCommonDeclRefExpr(DE, DE->getDecl(), Pred, Dst);
+      Bldr.addNodes(Dst);
+      break;
+    }
+
+    case Stmt::DeclStmtClass:
+      Bldr.takeNodes(Pred);
+      VisitDeclStmt(cast<DeclStmt>(S), Pred, Dst);
+      Bldr.addNodes(Dst);
+      break;
+
+    case Stmt::ImplicitCastExprClass:
+    case Stmt::CStyleCastExprClass:
+    case Stmt::CXXStaticCastExprClass:
+    case Stmt::CXXDynamicCastExprClass:
+    case Stmt::CXXReinterpretCastExprClass:
+    case Stmt::CXXConstCastExprClass:
+    case Stmt::CXXFunctionalCastExprClass: 
+    case Stmt::ObjCBridgedCastExprClass: {
+      Bldr.takeNodes(Pred);
+      const CastExpr *C = cast<CastExpr>(S);
+      // Handle the previsit checks.
+      ExplodedNodeSet dstPrevisit;
+      getCheckerManager().runCheckersForPreStmt(dstPrevisit, Pred, C, *this);
+      
+      // Handle the expression itself.
+      ExplodedNodeSet dstExpr;
+      for (ExplodedNodeSet::iterator i = dstPrevisit.begin(),
+                                     e = dstPrevisit.end(); i != e ; ++i) { 
+        VisitCast(C, C->getSubExpr(), *i, dstExpr);
+      }
+
+      // Handle the postvisit checks.
+      getCheckerManager().runCheckersForPostStmt(Dst, dstExpr, C, *this);
+      Bldr.addNodes(Dst);
+      break;
+    }
+
+    case Expr::MaterializeTemporaryExprClass: {
+      Bldr.takeNodes(Pred);
+      const MaterializeTemporaryExpr *MTE = cast<MaterializeTemporaryExpr>(S);
+      CreateCXXTemporaryObject(MTE, Pred, Dst);
+      Bldr.addNodes(Dst);
+      break;
+    }
+      
+    case Stmt::InitListExprClass:
+      Bldr.takeNodes(Pred);
+      VisitInitListExpr(cast<InitListExpr>(S), Pred, Dst);
+      Bldr.addNodes(Dst);
+      break;
+
+    case Stmt::MemberExprClass:
+      Bldr.takeNodes(Pred);
+      VisitMemberExpr(cast<MemberExpr>(S), Pred, Dst);
+      Bldr.addNodes(Dst);
+      break;
+
+    case Stmt::ObjCIvarRefExprClass:
+      Bldr.takeNodes(Pred);
+      VisitLvalObjCIvarRefExpr(cast<ObjCIvarRefExpr>(S), Pred, Dst);
+      Bldr.addNodes(Dst);
+      break;
+
+    case Stmt::ObjCForCollectionStmtClass:
+      Bldr.takeNodes(Pred);
+      VisitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(S), Pred, Dst);
+      Bldr.addNodes(Dst);
+      break;
+
+    case Stmt::ObjCMessageExprClass:
+      Bldr.takeNodes(Pred);
+      VisitObjCMessage(cast<ObjCMessageExpr>(S), Pred, Dst);
+      Bldr.addNodes(Dst);
+      break;
+
+    case Stmt::ObjCAtThrowStmtClass:
+    case Stmt::CXXThrowExprClass:
+      // FIXME: This is not complete.  We basically treat @throw as
+      // an abort.
+      Bldr.generateSink(S, Pred, Pred->getState());
+      break;
+
+    case Stmt::ReturnStmtClass:
+      Bldr.takeNodes(Pred);
+      VisitReturnStmt(cast<ReturnStmt>(S), Pred, Dst);
+      Bldr.addNodes(Dst);
+      break;
+
+    case Stmt::OffsetOfExprClass:
+      Bldr.takeNodes(Pred);
+      VisitOffsetOfExpr(cast<OffsetOfExpr>(S), Pred, Dst);
+      Bldr.addNodes(Dst);
+      break;
+
+    case Stmt::UnaryExprOrTypeTraitExprClass:
+      Bldr.takeNodes(Pred);
+      VisitUnaryExprOrTypeTraitExpr(cast<UnaryExprOrTypeTraitExpr>(S),
+                                    Pred, Dst);
+      Bldr.addNodes(Dst);
+      break;
+
+    case Stmt::StmtExprClass: {
+      const StmtExpr *SE = cast<StmtExpr>(S);
+
+      if (SE->getSubStmt()->body_empty()) {
+        // Empty statement expression.
+        assert(SE->getType() == getContext().VoidTy
+               && "Empty statement expression must have void type.");
+        break;
+      }
+
+      if (Expr *LastExpr = dyn_cast<Expr>(*SE->getSubStmt()->body_rbegin())) {
+        ProgramStateRef state = Pred->getState();
+        Bldr.generateNode(SE, Pred,
+                          state->BindExpr(SE, Pred->getLocationContext(),
+                                          state->getSVal(LastExpr,
+                                                  Pred->getLocationContext())));
+      }
+      break;
+    }
+
+    case Stmt::UnaryOperatorClass: {
+      Bldr.takeNodes(Pred);
+      const UnaryOperator *U = cast<UnaryOperator>(S);
+      if (AMgr.options.eagerlyAssumeBinOpBifurcation && (U->getOpcode() == UO_LNot)) {
+        ExplodedNodeSet Tmp;
+        VisitUnaryOperator(U, Pred, Tmp);
+        evalEagerlyAssumeBinOpBifurcation(Dst, Tmp, U);
+      }
+      else
+        VisitUnaryOperator(U, Pred, Dst);
+      Bldr.addNodes(Dst);
+      break;
+    }
+
+    case Stmt::PseudoObjectExprClass: {
+      Bldr.takeNodes(Pred);
+      ProgramStateRef state = Pred->getState();
+      const PseudoObjectExpr *PE = cast<PseudoObjectExpr>(S);
+      if (const Expr *Result = PE->getResultExpr()) { 
+        SVal V = state->getSVal(Result, Pred->getLocationContext());
+        Bldr.generateNode(S, Pred,
+                          state->BindExpr(S, Pred->getLocationContext(), V));
+      }
+      else
+        Bldr.generateNode(S, Pred,
+                          state->BindExpr(S, Pred->getLocationContext(),
+                                                   UnknownVal()));
+
+      Bldr.addNodes(Dst);
+      break;
+    }
+  }
+}
+
+bool ExprEngine::replayWithoutInlining(ExplodedNode *N,
+                                       const LocationContext *CalleeLC) {
+  const StackFrameContext *CalleeSF = CalleeLC->getCurrentStackFrame();
+  const StackFrameContext *CallerSF = CalleeSF->getParent()->getCurrentStackFrame();
+  assert(CalleeSF && CallerSF);
+  ExplodedNode *BeforeProcessingCall = 0;
+  const Stmt *CE = CalleeSF->getCallSite();
+
+  // Find the first node before we started processing the call expression.
+  while (N) {
+    ProgramPoint L = N->getLocation();
+    BeforeProcessingCall = N;
+    N = N->pred_empty() ? NULL : *(N->pred_begin());
+
+    // Skip the nodes corresponding to the inlined code.
+    if (L.getLocationContext()->getCurrentStackFrame() != CallerSF)
+      continue;
+    // We reached the caller. Find the node right before we started
+    // processing the call.
+    if (L.isPurgeKind())
+      continue;
+    if (L.getAs<PreImplicitCall>())
+      continue;
+    if (L.getAs<CallEnter>())
+      continue;
+    if (Optional<StmtPoint> SP = L.getAs<StmtPoint>())
+      if (SP->getStmt() == CE)
+        continue;
+    break;
+  }
+
+  if (!BeforeProcessingCall)
+    return false;
+
+  // TODO: Clean up the unneeded nodes.
+
+  // Build an Epsilon node from which we will restart the analyzes.
+  // Note that CE is permitted to be NULL!
+  ProgramPoint NewNodeLoc =
+               EpsilonPoint(BeforeProcessingCall->getLocationContext(), CE);
+  // Add the special flag to GDM to signal retrying with no inlining.
+  // Note, changing the state ensures that we are not going to cache out.
+  ProgramStateRef NewNodeState = BeforeProcessingCall->getState();
+  NewNodeState =
+    NewNodeState->set<ReplayWithoutInlining>(const_cast<Stmt *>(CE));
+
+  // Make the new node a successor of BeforeProcessingCall.
+  bool IsNew = false;
+  ExplodedNode *NewNode = G.getNode(NewNodeLoc, NewNodeState, false, &IsNew);
+  // We cached out at this point. Caching out is common due to us backtracking
+  // from the inlined function, which might spawn several paths.
+  if (!IsNew)
+    return true;
+
+  NewNode->addPredecessor(BeforeProcessingCall, G);
+
+  // Add the new node to the work list.
+  Engine.enqueueStmtNode(NewNode, CalleeSF->getCallSiteBlock(),
+                                  CalleeSF->getIndex());
+  NumTimesRetriedWithoutInlining++;
+  return true;
+}
+
+/// Block entrance.  (Update counters).
+void ExprEngine::processCFGBlockEntrance(const BlockEdge &L,
+                                         NodeBuilderWithSinks &nodeBuilder, 
+                                         ExplodedNode *Pred) {
+  
+  // FIXME: Refactor this into a checker.
+  if (nodeBuilder.getContext().blockCount() >= AMgr.options.maxBlockVisitOnPath) {
+    static SimpleProgramPointTag tag("ExprEngine : Block count exceeded");
+    const ExplodedNode *Sink =
+                   nodeBuilder.generateSink(Pred->getState(), Pred, &tag);
+
+    // Check if we stopped at the top level function or not.
+    // Root node should have the location context of the top most function.
+    const LocationContext *CalleeLC = Pred->getLocation().getLocationContext();
+    const LocationContext *CalleeSF = CalleeLC->getCurrentStackFrame();
+    const LocationContext *RootLC =
+                        (*G.roots_begin())->getLocation().getLocationContext();
+    if (RootLC->getCurrentStackFrame() != CalleeSF) {
+      Engine.FunctionSummaries->markReachedMaxBlockCount(CalleeSF->getDecl());
+
+      // Re-run the call evaluation without inlining it, by storing the
+      // no-inlining policy in the state and enqueuing the new work item on
+      // the list. Replay should almost never fail. Use the stats to catch it
+      // if it does.
+      if ((!AMgr.options.NoRetryExhausted &&
+           replayWithoutInlining(Pred, CalleeLC)))
+        return;
+      NumMaxBlockCountReachedInInlined++;
+    } else
+      NumMaxBlockCountReached++;
+
+    // Make sink nodes as exhausted(for stats) only if retry failed.
+    Engine.blocksExhausted.push_back(std::make_pair(L, Sink));
+  }
+}
+
+//===----------------------------------------------------------------------===//
+// Branch processing.
+//===----------------------------------------------------------------------===//
+
+/// RecoverCastedSymbol - A helper function for ProcessBranch that is used
+/// to try to recover some path-sensitivity for casts of symbolic
+/// integers that promote their values (which are currently not tracked well).
+/// This function returns the SVal bound to Condition->IgnoreCasts if all the
+//  cast(s) did was sign-extend the original value.
+static SVal RecoverCastedSymbol(ProgramStateManager& StateMgr,
+                                ProgramStateRef state,
+                                const Stmt *Condition,
+                                const LocationContext *LCtx,
+                                ASTContext &Ctx) {
+
+  const Expr *Ex = dyn_cast<Expr>(Condition);
+  if (!Ex)
+    return UnknownVal();
+
+  uint64_t bits = 0;
+  bool bitsInit = false;
+
+  while (const CastExpr *CE = dyn_cast<CastExpr>(Ex)) {
+    QualType T = CE->getType();
+
+    if (!T->isIntegralOrEnumerationType())
+      return UnknownVal();
+
+    uint64_t newBits = Ctx.getTypeSize(T);
+    if (!bitsInit || newBits < bits) {
+      bitsInit = true;
+      bits = newBits;
+    }
+
+    Ex = CE->getSubExpr();
+  }
+
+  // We reached a non-cast.  Is it a symbolic value?
+  QualType T = Ex->getType();
+
+  if (!bitsInit || !T->isIntegralOrEnumerationType() ||
+      Ctx.getTypeSize(T) > bits)
+    return UnknownVal();
+
+  return state->getSVal(Ex, LCtx);
+}
+
+static const Stmt *ResolveCondition(const Stmt *Condition,
+                                    const CFGBlock *B) {
+  if (const Expr *Ex = dyn_cast<Expr>(Condition))
+    Condition = Ex->IgnoreParens();
+
+  const BinaryOperator *BO = dyn_cast<BinaryOperator>(Condition);
+  if (!BO || !BO->isLogicalOp())
+    return Condition;
+
+  // For logical operations, we still have the case where some branches
+  // use the traditional "merge" approach and others sink the branch
+  // directly into the basic blocks representing the logical operation.
+  // We need to distinguish between those two cases here.
+
+  // The invariants are still shifting, but it is possible that the
+  // last element in a CFGBlock is not a CFGStmt.  Look for the last
+  // CFGStmt as the value of the condition.
+  CFGBlock::const_reverse_iterator I = B->rbegin(), E = B->rend();
+  for (; I != E; ++I) {
+    CFGElement Elem = *I;
+    Optional<CFGStmt> CS = Elem.getAs<CFGStmt>();
+    if (!CS)
+      continue;
+    if (CS->getStmt() != Condition)
+      break;
+    return Condition;
+  }
+
+  assert(I != E);
+
+  while (Condition) {
+    BO = dyn_cast<BinaryOperator>(Condition);
+    if (!BO || !BO->isLogicalOp())
+      return Condition;
+    Condition = BO->getRHS()->IgnoreParens();
+  }
+  llvm_unreachable("could not resolve condition");
+}
+
+void ExprEngine::processBranch(const Stmt *Condition, const Stmt *Term,
+                               NodeBuilderContext& BldCtx,
+                               ExplodedNode *Pred,
+                               ExplodedNodeSet &Dst,
+                               const CFGBlock *DstT,
+                               const CFGBlock *DstF) {
+  currBldrCtx = &BldCtx;
+
+  // Check for NULL conditions; e.g. "for(;;)"
+  if (!Condition) {
+    BranchNodeBuilder NullCondBldr(Pred, Dst, BldCtx, DstT, DstF);
+    NullCondBldr.markInfeasible(false);
+    NullCondBldr.generateNode(Pred->getState(), true, Pred);
+    return;
+  }
+
+
+  // Resolve the condition in the precense of nested '||' and '&&'.
+  if (const Expr *Ex = dyn_cast<Expr>(Condition))
+    Condition = Ex->IgnoreParens();
+
+  Condition = ResolveCondition(Condition, BldCtx.getBlock());
+  PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
+                                Condition->getLocStart(),
+                                "Error evaluating branch");
+
+  ExplodedNodeSet CheckersOutSet;
+  getCheckerManager().runCheckersForBranchCondition(Condition, CheckersOutSet,
+                                                    Pred, *this);
+  // We generated only sinks.
+  if (CheckersOutSet.empty())
+    return;
+
+  BranchNodeBuilder builder(CheckersOutSet, Dst, BldCtx, DstT, DstF);
+  for (NodeBuilder::iterator I = CheckersOutSet.begin(),
+                             E = CheckersOutSet.end(); E != I; ++I) {
+    ExplodedNode *PredI = *I;
+
+    if (PredI->isSink())
+      continue;
+
+    ProgramStateRef PrevState = PredI->getState();
+    SVal X = PrevState->getSVal(Condition, PredI->getLocationContext());
+
+    if (X.isUnknownOrUndef()) {
+      // Give it a chance to recover from unknown.
+      if (const Expr *Ex = dyn_cast<Expr>(Condition)) {
+        if (Ex->getType()->isIntegralOrEnumerationType()) {
+          // Try to recover some path-sensitivity.  Right now casts of symbolic
+          // integers that promote their values are currently not tracked well.
+          // If 'Condition' is such an expression, try and recover the
+          // underlying value and use that instead.
+          SVal recovered = RecoverCastedSymbol(getStateManager(),
+                                               PrevState, Condition,
+                                               PredI->getLocationContext(),
+                                               getContext());
+
+          if (!recovered.isUnknown()) {
+            X = recovered;
+          }
+        }
+      }
+    }
+    
+    // If the condition is still unknown, give up.
+    if (X.isUnknownOrUndef()) {
+      builder.generateNode(PrevState, true, PredI);
+      builder.generateNode(PrevState, false, PredI);
+      continue;
+    }
+
+    DefinedSVal V = X.castAs<DefinedSVal>();
+
+    ProgramStateRef StTrue, StFalse;
+    tie(StTrue, StFalse) = PrevState->assume(V);
+
+    // Process the true branch.
+    if (builder.isFeasible(true)) {
+      if (StTrue)
+        builder.generateNode(StTrue, true, PredI);
+      else
+        builder.markInfeasible(true);
+    }
+
+    // Process the false branch.
+    if (builder.isFeasible(false)) {
+      if (StFalse)
+        builder.generateNode(StFalse, false, PredI);
+      else
+        builder.markInfeasible(false);
+    }
+  }
+  currBldrCtx = 0;
+}
+
+/// The GDM component containing the set of global variables which have been
+/// previously initialized with explicit initializers.
+REGISTER_TRAIT_WITH_PROGRAMSTATE(InitializedGlobalsSet,
+                                 llvm::ImmutableSet<const VarDecl *>)
+
+void ExprEngine::processStaticInitializer(const DeclStmt *DS,
+                                          NodeBuilderContext &BuilderCtx,
+                                          ExplodedNode *Pred,
+                                          clang::ento::ExplodedNodeSet &Dst,
+                                          const CFGBlock *DstT,
+                                          const CFGBlock *DstF) {
+  currBldrCtx = &BuilderCtx;
+
+  const VarDecl *VD = cast<VarDecl>(DS->getSingleDecl());
+  ProgramStateRef state = Pred->getState();
+  bool initHasRun = state->contains<InitializedGlobalsSet>(VD);
+  BranchNodeBuilder builder(Pred, Dst, BuilderCtx, DstT, DstF);
+
+  if (!initHasRun) {
+    state = state->add<InitializedGlobalsSet>(VD);
+  }
+
+  builder.generateNode(state, initHasRun, Pred);
+  builder.markInfeasible(!initHasRun);
+
+  currBldrCtx = 0;
+}
+
+/// processIndirectGoto - Called by CoreEngine.  Used to generate successor
+///  nodes by processing the 'effects' of a computed goto jump.
+void ExprEngine::processIndirectGoto(IndirectGotoNodeBuilder &builder) {
+
+  ProgramStateRef state = builder.getState();
+  SVal V = state->getSVal(builder.getTarget(), builder.getLocationContext());
+
+  // Three possibilities:
+  //
+  //   (1) We know the computed label.
+  //   (2) The label is NULL (or some other constant), or Undefined.
+  //   (3) We have no clue about the label.  Dispatch to all targets.
+  //
+
+  typedef IndirectGotoNodeBuilder::iterator iterator;
+
+  if (Optional<loc::GotoLabel> LV = V.getAs<loc::GotoLabel>()) {
+    const LabelDecl *L = LV->getLabel();
+
+    for (iterator I = builder.begin(), E = builder.end(); I != E; ++I) {
+      if (I.getLabel() == L) {
+        builder.generateNode(I, state);
+        return;
+      }
+    }
+
+    llvm_unreachable("No block with label.");
+  }
+
+  if (V.getAs<loc::ConcreteInt>() || V.getAs<UndefinedVal>()) {
+    // Dispatch to the first target and mark it as a sink.
+    //ExplodedNode* N = builder.generateNode(builder.begin(), state, true);
+    // FIXME: add checker visit.
+    //    UndefBranches.insert(N);
+    return;
+  }
+
+  // This is really a catch-all.  We don't support symbolics yet.
+  // FIXME: Implement dispatch for symbolic pointers.
+
+  for (iterator I=builder.begin(), E=builder.end(); I != E; ++I)
+    builder.generateNode(I, state);
+}
+
+/// ProcessEndPath - Called by CoreEngine.  Used to generate end-of-path
+///  nodes when the control reaches the end of a function.
+void ExprEngine::processEndOfFunction(NodeBuilderContext& BC,
+                                      ExplodedNode *Pred) {
+  StateMgr.EndPath(Pred->getState());
+
+  ExplodedNodeSet Dst;
+  if (Pred->getLocationContext()->inTopFrame()) {
+    // Remove dead symbols.
+    ExplodedNodeSet AfterRemovedDead;
+    removeDeadOnEndOfFunction(BC, Pred, AfterRemovedDead);
+
+    // Notify checkers.
+    for (ExplodedNodeSet::iterator I = AfterRemovedDead.begin(),
+        E = AfterRemovedDead.end(); I != E; ++I) {
+      getCheckerManager().runCheckersForEndFunction(BC, Dst, *I, *this);
+    }
+  } else {
+    getCheckerManager().runCheckersForEndFunction(BC, Dst, Pred, *this);
+  }
+
+  Engine.enqueueEndOfFunction(Dst);
+}
+
+/// ProcessSwitch - Called by CoreEngine.  Used to generate successor
+///  nodes by processing the 'effects' of a switch statement.
+void ExprEngine::processSwitch(SwitchNodeBuilder& builder) {
+  typedef SwitchNodeBuilder::iterator iterator;
+  ProgramStateRef state = builder.getState();
+  const Expr *CondE = builder.getCondition();
+  SVal  CondV_untested = state->getSVal(CondE, builder.getLocationContext());
+
+  if (CondV_untested.isUndef()) {
+    //ExplodedNode* N = builder.generateDefaultCaseNode(state, true);
+    // FIXME: add checker
+    //UndefBranches.insert(N);
+
+    return;
+  }
+  DefinedOrUnknownSVal CondV = CondV_untested.castAs<DefinedOrUnknownSVal>();
+
+  ProgramStateRef DefaultSt = state;
+  
+  iterator I = builder.begin(), EI = builder.end();
+  bool defaultIsFeasible = I == EI;
+
+  for ( ; I != EI; ++I) {
+    // Successor may be pruned out during CFG construction.
+    if (!I.getBlock())
+      continue;
+    
+    const CaseStmt *Case = I.getCase();
+
+    // Evaluate the LHS of the case value.
+    llvm::APSInt V1 = Case->getLHS()->EvaluateKnownConstInt(getContext());
+    assert(V1.getBitWidth() == getContext().getTypeSize(CondE->getType()));
+
+    // Get the RHS of the case, if it exists.
+    llvm::APSInt V2;
+    if (const Expr *E = Case->getRHS())
+      V2 = E->EvaluateKnownConstInt(getContext());
+    else
+      V2 = V1;
+
+    // FIXME: Eventually we should replace the logic below with a range
+    //  comparison, rather than concretize the values within the range.
+    //  This should be easy once we have "ranges" for NonLVals.
+
+    do {
+      nonloc::ConcreteInt CaseVal(getBasicVals().getValue(V1));
+      DefinedOrUnknownSVal Res = svalBuilder.evalEQ(DefaultSt ? DefaultSt : state,
+                                               CondV, CaseVal);
+
+      // Now "assume" that the case matches.
+      if (ProgramStateRef stateNew = state->assume(Res, true)) {
+        builder.generateCaseStmtNode(I, stateNew);
+
+        // If CondV evaluates to a constant, then we know that this
+        // is the *only* case that we can take, so stop evaluating the
+        // others.
+        if (CondV.getAs<nonloc::ConcreteInt>())
+          return;
+      }
+
+      // Now "assume" that the case doesn't match.  Add this state
+      // to the default state (if it is feasible).
+      if (DefaultSt) {
+        if (ProgramStateRef stateNew = DefaultSt->assume(Res, false)) {
+          defaultIsFeasible = true;
+          DefaultSt = stateNew;
+        }
+        else {
+          defaultIsFeasible = false;
+          DefaultSt = NULL;
+        }
+      }
+
+      // Concretize the next value in the range.
+      if (V1 == V2)
+        break;
+
+      ++V1;
+      assert (V1 <= V2);
+
+    } while (true);
+  }
+
+  if (!defaultIsFeasible)
+    return;
+
+  // If we have switch(enum value), the default branch is not
+  // feasible if all of the enum constants not covered by 'case:' statements
+  // are not feasible values for the switch condition.
+  //
+  // Note that this isn't as accurate as it could be.  Even if there isn't
+  // a case for a particular enum value as long as that enum value isn't
+  // feasible then it shouldn't be considered for making 'default:' reachable.
+  const SwitchStmt *SS = builder.getSwitch();
+  const Expr *CondExpr = SS->getCond()->IgnoreParenImpCasts();
+  if (CondExpr->getType()->getAs<EnumType>()) {
+    if (SS->isAllEnumCasesCovered())
+      return;
+  }
+
+  builder.generateDefaultCaseNode(DefaultSt);
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer functions: Loads and stores.
+//===----------------------------------------------------------------------===//
+
+void ExprEngine::VisitCommonDeclRefExpr(const Expr *Ex, const NamedDecl *D,
+                                        ExplodedNode *Pred,
+                                        ExplodedNodeSet &Dst) {
+  StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+
+  ProgramStateRef state = Pred->getState();
+  const LocationContext *LCtx = Pred->getLocationContext();
+
+  if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+    assert(Ex->isGLValue());
+    SVal V = state->getLValue(VD, Pred->getLocationContext());
+
+    // For references, the 'lvalue' is the pointer address stored in the
+    // reference region.
+    if (VD->getType()->isReferenceType()) {
+      if (const MemRegion *R = V.getAsRegion())
+        V = state->getSVal(R);
+      else
+        V = UnknownVal();
+    }
+
+    Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V), 0,
+                      ProgramPoint::PostLValueKind);
+    return;
+  }
+  if (const EnumConstantDecl *ED = dyn_cast<EnumConstantDecl>(D)) {
+    assert(!Ex->isGLValue());
+    SVal V = svalBuilder.makeIntVal(ED->getInitVal());
+    Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V));
+    return;
+  }
+  if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+    SVal V = svalBuilder.getFunctionPointer(FD);
+    Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V), 0,
+                      ProgramPoint::PostLValueKind);
+    return;
+  }
+  if (isa<FieldDecl>(D)) {
+    // FIXME: Compute lvalue of field pointers-to-member.
+    // Right now we just use a non-null void pointer, so that it gives proper
+    // results in boolean contexts.
+    SVal V = svalBuilder.conjureSymbolVal(Ex, LCtx, getContext().VoidPtrTy,
+                                          currBldrCtx->blockCount());
+    state = state->assume(V.castAs<DefinedOrUnknownSVal>(), true);
+    Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V), 0,
+		      ProgramPoint::PostLValueKind);
+    return;
+  }
+
+  llvm_unreachable("Support for this Decl not implemented.");
+}
+
+/// VisitArraySubscriptExpr - Transfer function for array accesses
+void ExprEngine::VisitLvalArraySubscriptExpr(const ArraySubscriptExpr *A,
+                                             ExplodedNode *Pred,
+                                             ExplodedNodeSet &Dst){
+
+  const Expr *Base = A->getBase()->IgnoreParens();
+  const Expr *Idx  = A->getIdx()->IgnoreParens();
+  
+
+  ExplodedNodeSet checkerPreStmt;
+  getCheckerManager().runCheckersForPreStmt(checkerPreStmt, Pred, A, *this);
+
+  StmtNodeBuilder Bldr(checkerPreStmt, Dst, *currBldrCtx);
+
+  for (ExplodedNodeSet::iterator it = checkerPreStmt.begin(),
+                                 ei = checkerPreStmt.end(); it != ei; ++it) {
+    const LocationContext *LCtx = (*it)->getLocationContext();
+    ProgramStateRef state = (*it)->getState();
+    SVal V = state->getLValue(A->getType(),
+                              state->getSVal(Idx, LCtx),
+                              state->getSVal(Base, LCtx));
+    assert(A->isGLValue());
+    Bldr.generateNode(A, *it, state->BindExpr(A, LCtx, V), 0, 
+                      ProgramPoint::PostLValueKind);
+  }
+}
+
+/// VisitMemberExpr - Transfer function for member expressions.
+void ExprEngine::VisitMemberExpr(const MemberExpr *M, ExplodedNode *Pred,
+                                 ExplodedNodeSet &TopDst) {
+
+  StmtNodeBuilder Bldr(Pred, TopDst, *currBldrCtx);
+  ExplodedNodeSet Dst;
+  ValueDecl *Member = M->getMemberDecl();
+
+  // Handle static member variables and enum constants accessed via
+  // member syntax.
+  if (isa<VarDecl>(Member) || isa<EnumConstantDecl>(Member)) {
+    Bldr.takeNodes(Pred);
+    VisitCommonDeclRefExpr(M, Member, Pred, Dst);
+    Bldr.addNodes(Dst);
+    return;
+  }
+
+  ProgramStateRef state = Pred->getState();
+  const LocationContext *LCtx = Pred->getLocationContext();
+  Expr *BaseExpr = M->getBase();
+
+  // Handle C++ method calls.
+  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Member)) {
+    if (MD->isInstance())
+      state = createTemporaryRegionIfNeeded(state, LCtx, BaseExpr);
+
+    SVal MDVal = svalBuilder.getFunctionPointer(MD);
+    state = state->BindExpr(M, LCtx, MDVal);
+
+    Bldr.generateNode(M, Pred, state);
+    return;
+  }
+
+  // Handle regular struct fields / member variables.
+  state = createTemporaryRegionIfNeeded(state, LCtx, BaseExpr);
+  SVal baseExprVal = state->getSVal(BaseExpr, LCtx);
+
+  FieldDecl *field = cast<FieldDecl>(Member);
+  SVal L = state->getLValue(field, baseExprVal);
+  if (M->isGLValue()) {
+    if (field->getType()->isReferenceType()) {
+      if (const MemRegion *R = L.getAsRegion())
+        L = state->getSVal(R);
+      else
+        L = UnknownVal();
+    }
+
+    Bldr.generateNode(M, Pred, state->BindExpr(M, LCtx, L), 0,
+                      ProgramPoint::PostLValueKind);
+  } else {
+    Bldr.takeNodes(Pred);
+    evalLoad(Dst, M, M, Pred, state, L);
+    Bldr.addNodes(Dst);
+  }
+}
+
+namespace {
+class CollectReachableSymbolsCallback : public SymbolVisitor {
+  InvalidatedSymbols Symbols;
+public:
+  CollectReachableSymbolsCallback(ProgramStateRef State) {}
+  const InvalidatedSymbols &getSymbols() const { return Symbols; }
+
+  bool VisitSymbol(SymbolRef Sym) {
+    Symbols.insert(Sym);
+    return true;
+  }
+};
+} // end anonymous namespace
+
+// A value escapes in three possible cases:
+// (1) We are binding to something that is not a memory region.
+// (2) We are binding to a MemrRegion that does not have stack storage.
+// (3) We are binding to a MemRegion with stack storage that the store
+//     does not understand.
+ProgramStateRef ExprEngine::processPointerEscapedOnBind(ProgramStateRef State,
+                                                        SVal Loc, SVal Val) {
+  // Are we storing to something that causes the value to "escape"?
+  bool escapes = true;
+
+  // TODO: Move to StoreManager.
+  if (Optional<loc::MemRegionVal> regionLoc = Loc.getAs<loc::MemRegionVal>()) {
+    escapes = !regionLoc->getRegion()->hasStackStorage();
+
+    if (!escapes) {
+      // To test (3), generate a new state with the binding added.  If it is
+      // the same state, then it escapes (since the store cannot represent
+      // the binding).
+      // Do this only if we know that the store is not supposed to generate the
+      // same state.
+      SVal StoredVal = State->getSVal(regionLoc->getRegion());
+      if (StoredVal != Val)
+        escapes = (State == (State->bindLoc(*regionLoc, Val)));
+    }
+  }
+
+  // If our store can represent the binding and we aren't storing to something
+  // that doesn't have local storage then just return and have the simulation
+  // state continue as is.
+  if (!escapes)
+    return State;
+
+  // Otherwise, find all symbols referenced by 'val' that we are tracking
+  // and stop tracking them.
+  CollectReachableSymbolsCallback Scanner =
+      State->scanReachableSymbols<CollectReachableSymbolsCallback>(Val);
+  const InvalidatedSymbols &EscapedSymbols = Scanner.getSymbols();
+  State = getCheckerManager().runCheckersForPointerEscape(State,
+                                                          EscapedSymbols,
+                                                          /*CallEvent*/ 0,
+                                                          PSK_EscapeOnBind);
+
+  return State;
+}
+
+ProgramStateRef 
+ExprEngine::notifyCheckersOfPointerEscape(ProgramStateRef State,
+    const InvalidatedSymbols *Invalidated,
+    ArrayRef<const MemRegion *> ExplicitRegions,
+    ArrayRef<const MemRegion *> Regions,
+    const CallEvent *Call,
+    bool IsConst) {
+  
+  if (!Invalidated || Invalidated->empty())
+    return State;
+
+  if (!Call)
+    return getCheckerManager().runCheckersForPointerEscape(State,
+                                                           *Invalidated,
+                                                           0,
+                                                           PSK_EscapeOther,
+                                                           IsConst);
+
+  // Note: Due to current limitations of RegionStore, we only process the top
+  // level const pointers correctly. The lower level const pointers are
+  // currently treated as non-const.
+  if (IsConst)
+    return getCheckerManager().runCheckersForPointerEscape(State,
+                                                        *Invalidated,
+                                                        Call,
+                                                        PSK_DirectEscapeOnCall,
+                                                        true);
+
+  // If the symbols were invalidated by a call, we want to find out which ones 
+  // were invalidated directly due to being arguments to the call.
+  InvalidatedSymbols SymbolsDirectlyInvalidated;
+  for (ArrayRef<const MemRegion *>::iterator I = ExplicitRegions.begin(),
+      E = ExplicitRegions.end(); I != E; ++I) {
+    if (const SymbolicRegion *R = (*I)->StripCasts()->getAs<SymbolicRegion>())
+      SymbolsDirectlyInvalidated.insert(R->getSymbol());
+  }
+
+  InvalidatedSymbols SymbolsIndirectlyInvalidated;
+  for (InvalidatedSymbols::const_iterator I=Invalidated->begin(),
+      E = Invalidated->end(); I!=E; ++I) {
+    SymbolRef sym = *I;
+    if (SymbolsDirectlyInvalidated.count(sym))
+      continue;
+    SymbolsIndirectlyInvalidated.insert(sym);
+  }
+
+  if (!SymbolsDirectlyInvalidated.empty())
+    State = getCheckerManager().runCheckersForPointerEscape(State,
+        SymbolsDirectlyInvalidated, Call, PSK_DirectEscapeOnCall);
+
+  // Notify about the symbols that get indirectly invalidated by the call.
+  if (!SymbolsIndirectlyInvalidated.empty())
+    State = getCheckerManager().runCheckersForPointerEscape(State,
+        SymbolsIndirectlyInvalidated, Call, PSK_IndirectEscapeOnCall);
+
+  return State;
+}
+
+/// evalBind - Handle the semantics of binding a value to a specific location.
+///  This method is used by evalStore and (soon) VisitDeclStmt, and others.
+void ExprEngine::evalBind(ExplodedNodeSet &Dst, const Stmt *StoreE,
+                          ExplodedNode *Pred,
+                          SVal location, SVal Val,
+                          bool atDeclInit, const ProgramPoint *PP) {
+
+  const LocationContext *LC = Pred->getLocationContext();
+  PostStmt PS(StoreE, LC);
+  if (!PP)
+    PP = &PS;
+
+  // Do a previsit of the bind.
+  ExplodedNodeSet CheckedSet;
+  getCheckerManager().runCheckersForBind(CheckedSet, Pred, location, Val,
+                                         StoreE, *this, *PP);
+
+
+  StmtNodeBuilder Bldr(CheckedSet, Dst, *currBldrCtx);
+
+  // If the location is not a 'Loc', it will already be handled by
+  // the checkers.  There is nothing left to do.
+  if (!location.getAs<Loc>()) {
+    const ProgramPoint L = PostStore(StoreE, LC, /*Loc*/0, /*tag*/0);
+    ProgramStateRef state = Pred->getState();
+    state = processPointerEscapedOnBind(state, location, Val);
+    Bldr.generateNode(L, state, Pred);
+    return;
+  }
+  
+
+  for (ExplodedNodeSet::iterator I = CheckedSet.begin(), E = CheckedSet.end();
+       I!=E; ++I) {
+    ExplodedNode *PredI = *I;
+    ProgramStateRef state = PredI->getState();
+    
+    state = processPointerEscapedOnBind(state, location, Val);
+
+    // When binding the value, pass on the hint that this is a initialization.
+    // For initializations, we do not need to inform clients of region
+    // changes.
+    state = state->bindLoc(location.castAs<Loc>(),
+                           Val, /* notifyChanges = */ !atDeclInit);
+
+    const MemRegion *LocReg = 0;
+    if (Optional<loc::MemRegionVal> LocRegVal =
+            location.getAs<loc::MemRegionVal>()) {
+      LocReg = LocRegVal->getRegion();
+    }
+    
+    const ProgramPoint L = PostStore(StoreE, LC, LocReg, 0);
+    Bldr.generateNode(L, state, PredI);
+  }
+}
+
+/// evalStore - Handle the semantics of a store via an assignment.
+///  @param Dst The node set to store generated state nodes
+///  @param AssignE The assignment expression if the store happens in an
+///         assignment.
+///  @param LocationE The location expression that is stored to.
+///  @param state The current simulation state
+///  @param location The location to store the value
+///  @param Val The value to be stored
+void ExprEngine::evalStore(ExplodedNodeSet &Dst, const Expr *AssignE,
+                             const Expr *LocationE,
+                             ExplodedNode *Pred,
+                             ProgramStateRef state, SVal location, SVal Val,
+                             const ProgramPointTag *tag) {
+  // Proceed with the store.  We use AssignE as the anchor for the PostStore
+  // ProgramPoint if it is non-NULL, and LocationE otherwise.
+  const Expr *StoreE = AssignE ? AssignE : LocationE;
+
+  // Evaluate the location (checks for bad dereferences).
+  ExplodedNodeSet Tmp;
+  evalLocation(Tmp, AssignE, LocationE, Pred, state, location, tag, false);
+
+  if (Tmp.empty())
+    return;
+
+  if (location.isUndef())
+    return;
+
+  for (ExplodedNodeSet::iterator NI=Tmp.begin(), NE=Tmp.end(); NI!=NE; ++NI)
+    evalBind(Dst, StoreE, *NI, location, Val, false);
+}
+
+void ExprEngine::evalLoad(ExplodedNodeSet &Dst,
+                          const Expr *NodeEx,
+                          const Expr *BoundEx,
+                          ExplodedNode *Pred,
+                          ProgramStateRef state,
+                          SVal location,
+                          const ProgramPointTag *tag,
+                          QualType LoadTy)
+{
+  assert(!location.getAs<NonLoc>() && "location cannot be a NonLoc.");
+
+  // Are we loading from a region?  This actually results in two loads; one
+  // to fetch the address of the referenced value and one to fetch the
+  // referenced value.
+  if (const TypedValueRegion *TR =
+        dyn_cast_or_null<TypedValueRegion>(location.getAsRegion())) {
+
+    QualType ValTy = TR->getValueType();
+    if (const ReferenceType *RT = ValTy->getAs<ReferenceType>()) {
+      static SimpleProgramPointTag
+             loadReferenceTag("ExprEngine : Load Reference");
+      ExplodedNodeSet Tmp;
+      evalLoadCommon(Tmp, NodeEx, BoundEx, Pred, state,
+                     location, &loadReferenceTag,
+                     getContext().getPointerType(RT->getPointeeType()));
+
+      // Perform the load from the referenced value.
+      for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end() ; I!=E; ++I) {
+        state = (*I)->getState();
+        location = state->getSVal(BoundEx, (*I)->getLocationContext());
+        evalLoadCommon(Dst, NodeEx, BoundEx, *I, state, location, tag, LoadTy);
+      }
+      return;
+    }
+  }
+
+  evalLoadCommon(Dst, NodeEx, BoundEx, Pred, state, location, tag, LoadTy);
+}
+
+void ExprEngine::evalLoadCommon(ExplodedNodeSet &Dst,
+                                const Expr *NodeEx,
+                                const Expr *BoundEx,
+                                ExplodedNode *Pred,
+                                ProgramStateRef state,
+                                SVal location,
+                                const ProgramPointTag *tag,
+                                QualType LoadTy) {
+  assert(NodeEx);
+  assert(BoundEx);
+  // Evaluate the location (checks for bad dereferences).
+  ExplodedNodeSet Tmp;
+  evalLocation(Tmp, NodeEx, BoundEx, Pred, state, location, tag, true);
+  if (Tmp.empty())
+    return;
+
+  StmtNodeBuilder Bldr(Tmp, Dst, *currBldrCtx);
+  if (location.isUndef())
+    return;
+
+  // Proceed with the load.
+  for (ExplodedNodeSet::iterator NI=Tmp.begin(), NE=Tmp.end(); NI!=NE; ++NI) {
+    state = (*NI)->getState();
+    const LocationContext *LCtx = (*NI)->getLocationContext();
+
+    SVal V = UnknownVal();
+    if (location.isValid()) {
+      if (LoadTy.isNull())
+        LoadTy = BoundEx->getType();
+      V = state->getSVal(location.castAs<Loc>(), LoadTy);
+    }
+
+    Bldr.generateNode(NodeEx, *NI, state->BindExpr(BoundEx, LCtx, V), tag,
+                      ProgramPoint::PostLoadKind);
+  }
+}
+
+void ExprEngine::evalLocation(ExplodedNodeSet &Dst,
+                              const Stmt *NodeEx,
+                              const Stmt *BoundEx,
+                              ExplodedNode *Pred,
+                              ProgramStateRef state,
+                              SVal location,
+                              const ProgramPointTag *tag,
+                              bool isLoad) {
+  StmtNodeBuilder BldrTop(Pred, Dst, *currBldrCtx);
+  // Early checks for performance reason.
+  if (location.isUnknown()) {
+    return;
+  }
+
+  ExplodedNodeSet Src;
+  BldrTop.takeNodes(Pred);
+  StmtNodeBuilder Bldr(Pred, Src, *currBldrCtx);
+  if (Pred->getState() != state) {
+    // Associate this new state with an ExplodedNode.
+    // FIXME: If I pass null tag, the graph is incorrect, e.g for
+    //   int *p;
+    //   p = 0;
+    //   *p = 0xDEADBEEF;
+    // "p = 0" is not noted as "Null pointer value stored to 'p'" but
+    // instead "int *p" is noted as
+    // "Variable 'p' initialized to a null pointer value"
+    
+    static SimpleProgramPointTag tag("ExprEngine: Location");
+    Bldr.generateNode(NodeEx, Pred, state, &tag);
+  }
+  ExplodedNodeSet Tmp;
+  getCheckerManager().runCheckersForLocation(Tmp, Src, location, isLoad,
+                                             NodeEx, BoundEx, *this);
+  BldrTop.addNodes(Tmp);
+}
+
+std::pair<const ProgramPointTag *, const ProgramPointTag*>
+ExprEngine::geteagerlyAssumeBinOpBifurcationTags() {
+  static SimpleProgramPointTag
+         eagerlyAssumeBinOpBifurcationTrue("ExprEngine : Eagerly Assume True"),
+         eagerlyAssumeBinOpBifurcationFalse("ExprEngine : Eagerly Assume False");
+  return std::make_pair(&eagerlyAssumeBinOpBifurcationTrue,
+                        &eagerlyAssumeBinOpBifurcationFalse);
+}
+
+void ExprEngine::evalEagerlyAssumeBinOpBifurcation(ExplodedNodeSet &Dst,
+                                                   ExplodedNodeSet &Src,
+                                                   const Expr *Ex) {
+  StmtNodeBuilder Bldr(Src, Dst, *currBldrCtx);
+  
+  for (ExplodedNodeSet::iterator I=Src.begin(), E=Src.end(); I!=E; ++I) {
+    ExplodedNode *Pred = *I;
+    // Test if the previous node was as the same expression.  This can happen
+    // when the expression fails to evaluate to anything meaningful and
+    // (as an optimization) we don't generate a node.
+    ProgramPoint P = Pred->getLocation();
+    if (!P.getAs<PostStmt>() || P.castAs<PostStmt>().getStmt() != Ex) {
+      continue;
+    }
+
+    ProgramStateRef state = Pred->getState();
+    SVal V = state->getSVal(Ex, Pred->getLocationContext());
+    Optional<nonloc::SymbolVal> SEV = V.getAs<nonloc::SymbolVal>();
+    if (SEV && SEV->isExpression()) {
+      const std::pair<const ProgramPointTag *, const ProgramPointTag*> &tags =
+        geteagerlyAssumeBinOpBifurcationTags();
+
+      ProgramStateRef StateTrue, StateFalse;
+      tie(StateTrue, StateFalse) = state->assume(*SEV);
+
+      // First assume that the condition is true.
+      if (StateTrue) {
+        SVal Val = svalBuilder.makeIntVal(1U, Ex->getType());        
+        StateTrue = StateTrue->BindExpr(Ex, Pred->getLocationContext(), Val);
+        Bldr.generateNode(Ex, Pred, StateTrue, tags.first);
+      }
+
+      // Next, assume that the condition is false.
+      if (StateFalse) {
+        SVal Val = svalBuilder.makeIntVal(0U, Ex->getType());
+        StateFalse = StateFalse->BindExpr(Ex, Pred->getLocationContext(), Val);
+        Bldr.generateNode(Ex, Pred, StateFalse, tags.second);
+      }
+    }
+  }
+}
+
+void ExprEngine::VisitGCCAsmStmt(const GCCAsmStmt *A, ExplodedNode *Pred,
+                                 ExplodedNodeSet &Dst) {
+  StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+  // We have processed both the inputs and the outputs.  All of the outputs
+  // should evaluate to Locs.  Nuke all of their values.
+
+  // FIXME: Some day in the future it would be nice to allow a "plug-in"
+  // which interprets the inline asm and stores proper results in the
+  // outputs.
+
+  ProgramStateRef state = Pred->getState();
+
+  for (GCCAsmStmt::const_outputs_iterator OI = A->begin_outputs(),
+       OE = A->end_outputs(); OI != OE; ++OI) {
+    SVal X = state->getSVal(*OI, Pred->getLocationContext());
+    assert (!X.getAs<NonLoc>());  // Should be an Lval, or unknown, undef.
+
+    if (Optional<Loc> LV = X.getAs<Loc>())
+      state = state->bindLoc(*LV, UnknownVal());
+  }
+
+  Bldr.generateNode(A, Pred, state);
+}
+
+void ExprEngine::VisitMSAsmStmt(const MSAsmStmt *A, ExplodedNode *Pred,
+                                ExplodedNodeSet &Dst) {
+  StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+  Bldr.generateNode(A, Pred, Pred->getState());
+}
+
+//===----------------------------------------------------------------------===//
+// Visualization.
+//===----------------------------------------------------------------------===//
+
+#ifndef NDEBUG
+static ExprEngine* GraphPrintCheckerState;
+static SourceManager* GraphPrintSourceManager;
+
+namespace llvm {
+template<>
+struct DOTGraphTraits<ExplodedNode*> :
+  public DefaultDOTGraphTraits {
+
+  DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
+
+  // FIXME: Since we do not cache error nodes in ExprEngine now, this does not
+  // work.
+  static std::string getNodeAttributes(const ExplodedNode *N, void*) {
+
+#if 0
+      // FIXME: Replace with a general scheme to tell if the node is
+      // an error node.
+    if (GraphPrintCheckerState->isImplicitNullDeref(N) ||
+        GraphPrintCheckerState->isExplicitNullDeref(N) ||
+        GraphPrintCheckerState->isUndefDeref(N) ||
+        GraphPrintCheckerState->isUndefStore(N) ||
+        GraphPrintCheckerState->isUndefControlFlow(N) ||
+        GraphPrintCheckerState->isUndefResult(N) ||
+        GraphPrintCheckerState->isBadCall(N) ||
+        GraphPrintCheckerState->isUndefArg(N))
+      return "color=\"red\",style=\"filled\"";
+
+    if (GraphPrintCheckerState->isNoReturnCall(N))
+      return "color=\"blue\",style=\"filled\"";
+#endif
+    return "";
+  }
+
+  static void printLocation(raw_ostream &Out, SourceLocation SLoc) {
+    if (SLoc.isFileID()) {
+      Out << "\\lline="
+        << GraphPrintSourceManager->getExpansionLineNumber(SLoc)
+        << " col="
+        << GraphPrintSourceManager->getExpansionColumnNumber(SLoc)
+        << "\\l";
+    }
+  }
+
+  static std::string getNodeLabel(const ExplodedNode *N, void*){
+
+    std::string sbuf;
+    llvm::raw_string_ostream Out(sbuf);
+
+    // Program Location.
+    ProgramPoint Loc = N->getLocation();
+
+    switch (Loc.getKind()) {
+      case ProgramPoint::BlockEntranceKind: {
+        Out << "Block Entrance: B"
+            << Loc.castAs<BlockEntrance>().getBlock()->getBlockID();
+        if (const NamedDecl *ND =
+                    dyn_cast<NamedDecl>(Loc.getLocationContext()->getDecl())) {
+          Out << " (";
+          ND->printName(Out);
+          Out << ")";
+        }
+        break;
+      }
+
+      case ProgramPoint::BlockExitKind:
+        assert (false);
+        break;
+
+      case ProgramPoint::CallEnterKind:
+        Out << "CallEnter";
+        break;
+
+      case ProgramPoint::CallExitBeginKind:
+        Out << "CallExitBegin";
+        break;
+
+      case ProgramPoint::CallExitEndKind:
+        Out << "CallExitEnd";
+        break;
+
+      case ProgramPoint::PostStmtPurgeDeadSymbolsKind:
+        Out << "PostStmtPurgeDeadSymbols";
+        break;
+
+      case ProgramPoint::PreStmtPurgeDeadSymbolsKind:
+        Out << "PreStmtPurgeDeadSymbols";
+        break;
+
+      case ProgramPoint::EpsilonKind:
+        Out << "Epsilon Point";
+        break;
+
+      case ProgramPoint::PreImplicitCallKind: {
+        ImplicitCallPoint PC = Loc.castAs<ImplicitCallPoint>();
+        Out << "PreCall: ";
+
+        // FIXME: Get proper printing options.
+        PC.getDecl()->print(Out, LangOptions());
+        printLocation(Out, PC.getLocation());
+        break;
+      }
+
+      case ProgramPoint::PostImplicitCallKind: {
+        ImplicitCallPoint PC = Loc.castAs<ImplicitCallPoint>();
+        Out << "PostCall: ";
+
+        // FIXME: Get proper printing options.
+        PC.getDecl()->print(Out, LangOptions());
+        printLocation(Out, PC.getLocation());
+        break;
+      }
+
+      case ProgramPoint::PostInitializerKind: {
+        Out << "PostInitializer: ";
+        const CXXCtorInitializer *Init =
+          Loc.castAs<PostInitializer>().getInitializer();
+        if (const FieldDecl *FD = Init->getAnyMember())
+          Out << *FD;
+        else {
+          QualType Ty = Init->getTypeSourceInfo()->getType();
+          Ty = Ty.getLocalUnqualifiedType();
+          LangOptions LO; // FIXME.
+          Ty.print(Out, LO);
+        }
+        break;
+      }
+
+      case ProgramPoint::BlockEdgeKind: {
+        const BlockEdge &E = Loc.castAs<BlockEdge>();
+        Out << "Edge: (B" << E.getSrc()->getBlockID() << ", B"
+            << E.getDst()->getBlockID()  << ')';
+
+        if (const Stmt *T = E.getSrc()->getTerminator()) {
+          SourceLocation SLoc = T->getLocStart();
+
+          Out << "\\|Terminator: ";
+          LangOptions LO; // FIXME.
+          E.getSrc()->printTerminator(Out, LO);
+
+          if (SLoc.isFileID()) {
+            Out << "\\lline="
+              << GraphPrintSourceManager->getExpansionLineNumber(SLoc)
+              << " col="
+              << GraphPrintSourceManager->getExpansionColumnNumber(SLoc);
+          }
+
+          if (isa<SwitchStmt>(T)) {
+            const Stmt *Label = E.getDst()->getLabel();
+
+            if (Label) {
+              if (const CaseStmt *C = dyn_cast<CaseStmt>(Label)) {
+                Out << "\\lcase ";
+                LangOptions LO; // FIXME.
+                C->getLHS()->printPretty(Out, 0, PrintingPolicy(LO));
+
+                if (const Stmt *RHS = C->getRHS()) {
+                  Out << " .. ";
+                  RHS->printPretty(Out, 0, PrintingPolicy(LO));
+                }
+
+                Out << ":";
+              }
+              else {
+                assert (isa<DefaultStmt>(Label));
+                Out << "\\ldefault:";
+              }
+            }
+            else
+              Out << "\\l(implicit) default:";
+          }
+          else if (isa<IndirectGotoStmt>(T)) {
+            // FIXME
+          }
+          else {
+            Out << "\\lCondition: ";
+            if (*E.getSrc()->succ_begin() == E.getDst())
+              Out << "true";
+            else
+              Out << "false";
+          }
+
+          Out << "\\l";
+        }
+
+#if 0
+          // FIXME: Replace with a general scheme to determine
+          // the name of the check.
+        if (GraphPrintCheckerState->isUndefControlFlow(N)) {
+          Out << "\\|Control-flow based on\\lUndefined value.\\l";
+        }
+#endif
+        break;
+      }
+
+      default: {
+        const Stmt *S = Loc.castAs<StmtPoint>().getStmt();
+
+        Out << S->getStmtClassName() << ' ' << (const void*) S << ' ';
+        LangOptions LO; // FIXME.
+        S->printPretty(Out, 0, PrintingPolicy(LO));
+        printLocation(Out, S->getLocStart());
+
+        if (Loc.getAs<PreStmt>())
+          Out << "\\lPreStmt\\l;";
+        else if (Loc.getAs<PostLoad>())
+          Out << "\\lPostLoad\\l;";
+        else if (Loc.getAs<PostStore>())
+          Out << "\\lPostStore\\l";
+        else if (Loc.getAs<PostLValue>())
+          Out << "\\lPostLValue\\l";
+
+#if 0
+          // FIXME: Replace with a general scheme to determine
+          // the name of the check.
+        if (GraphPrintCheckerState->isImplicitNullDeref(N))
+          Out << "\\|Implicit-Null Dereference.\\l";
+        else if (GraphPrintCheckerState->isExplicitNullDeref(N))
+          Out << "\\|Explicit-Null Dereference.\\l";
+        else if (GraphPrintCheckerState->isUndefDeref(N))
+          Out << "\\|Dereference of undefialied value.\\l";
+        else if (GraphPrintCheckerState->isUndefStore(N))
+          Out << "\\|Store to Undefined Loc.";
+        else if (GraphPrintCheckerState->isUndefResult(N))
+          Out << "\\|Result of operation is undefined.";
+        else if (GraphPrintCheckerState->isNoReturnCall(N))
+          Out << "\\|Call to function marked \"noreturn\".";
+        else if (GraphPrintCheckerState->isBadCall(N))
+          Out << "\\|Call to NULL/Undefined.";
+        else if (GraphPrintCheckerState->isUndefArg(N))
+          Out << "\\|Argument in call is undefined";
+#endif
+
+        break;
+      }
+    }
+
+    ProgramStateRef state = N->getState();
+    Out << "\\|StateID: " << (const void*) state.getPtr()
+        << " NodeID: " << (const void*) N << "\\|";
+    state->printDOT(Out);
+
+    Out << "\\l";    
+
+    if (const ProgramPointTag *tag = Loc.getTag()) {
+      Out << "\\|Tag: " << tag->getTagDescription(); 
+      Out << "\\l";
+    }
+    return Out.str();
+  }
+};
+} // end llvm namespace
+#endif
+
+#ifndef NDEBUG
+template <typename ITERATOR>
+ExplodedNode *GetGraphNode(ITERATOR I) { return *I; }
+
+template <> ExplodedNode*
+GetGraphNode<llvm::DenseMap<ExplodedNode*, Expr*>::iterator>
+  (llvm::DenseMap<ExplodedNode*, Expr*>::iterator I) {
+  return I->first;
+}
+#endif
+
+void ExprEngine::ViewGraph(bool trim) {
+#ifndef NDEBUG
+  if (trim) {
+    std::vector<const ExplodedNode*> Src;
+
+    // Flush any outstanding reports to make sure we cover all the nodes.
+    // This does not cause them to get displayed.
+    for (BugReporter::iterator I=BR.begin(), E=BR.end(); I!=E; ++I)
+      const_cast<BugType*>(*I)->FlushReports(BR);
+
+    // Iterate through the reports and get their nodes.
+    for (BugReporter::EQClasses_iterator
+           EI = BR.EQClasses_begin(), EE = BR.EQClasses_end(); EI != EE; ++EI) {
+      ExplodedNode *N = const_cast<ExplodedNode*>(EI->begin()->getErrorNode());
+      if (N) Src.push_back(N);
+    }
+
+    ViewGraph(Src);
+  }
+  else {
+    GraphPrintCheckerState = this;
+    GraphPrintSourceManager = &getContext().getSourceManager();
+
+    llvm::ViewGraph(*G.roots_begin(), "ExprEngine");
+
+    GraphPrintCheckerState = NULL;
+    GraphPrintSourceManager = NULL;
+  }
+#endif
+}
+
+void ExprEngine::ViewGraph(ArrayRef<const ExplodedNode*> Nodes) {
+#ifndef NDEBUG
+  GraphPrintCheckerState = this;
+  GraphPrintSourceManager = &getContext().getSourceManager();
+
+  OwningPtr<ExplodedGraph> TrimmedG(G.trim(Nodes));
+
+  if (!TrimmedG.get())
+    llvm::errs() << "warning: Trimmed ExplodedGraph is empty.\n";
+  else
+    llvm::ViewGraph(*TrimmedG->roots_begin(), "TrimmedExprEngine");
+
+  GraphPrintCheckerState = NULL;
+  GraphPrintSourceManager = NULL;
+#endif
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
new file mode 100644
index 0000000..67aeab6
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
@@ -0,0 +1,944 @@
+//=-- ExprEngineC.cpp - ExprEngine support for C expressions ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines ExprEngine's support for C expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ExprCXX.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+
+using namespace clang;
+using namespace ento;
+using llvm::APSInt;
+
+void ExprEngine::VisitBinaryOperator(const BinaryOperator* B,
+                                     ExplodedNode *Pred,
+                                     ExplodedNodeSet &Dst) {
+
+  Expr *LHS = B->getLHS()->IgnoreParens();
+  Expr *RHS = B->getRHS()->IgnoreParens();
+  
+  // FIXME: Prechecks eventually go in ::Visit().
+  ExplodedNodeSet CheckedSet;
+  ExplodedNodeSet Tmp2;
+  getCheckerManager().runCheckersForPreStmt(CheckedSet, Pred, B, *this);
+    
+  // With both the LHS and RHS evaluated, process the operation itself.    
+  for (ExplodedNodeSet::iterator it=CheckedSet.begin(), ei=CheckedSet.end();
+         it != ei; ++it) {
+      
+    ProgramStateRef state = (*it)->getState();
+    const LocationContext *LCtx = (*it)->getLocationContext();
+    SVal LeftV = state->getSVal(LHS, LCtx);
+    SVal RightV = state->getSVal(RHS, LCtx);
+      
+    BinaryOperator::Opcode Op = B->getOpcode();
+      
+    if (Op == BO_Assign) {
+      // EXPERIMENTAL: "Conjured" symbols.
+      // FIXME: Handle structs.
+      if (RightV.isUnknown()) {
+        unsigned Count = currBldrCtx->blockCount();
+        RightV = svalBuilder.conjureSymbolVal(0, B->getRHS(), LCtx, Count);
+      }
+      // Simulate the effects of a "store":  bind the value of the RHS
+      // to the L-Value represented by the LHS.
+      SVal ExprVal = B->isGLValue() ? LeftV : RightV;
+      evalStore(Tmp2, B, LHS, *it, state->BindExpr(B, LCtx, ExprVal),
+                LeftV, RightV);
+      continue;
+    }
+      
+    if (!B->isAssignmentOp()) {
+      StmtNodeBuilder Bldr(*it, Tmp2, *currBldrCtx);
+
+      if (B->isAdditiveOp()) {
+        // If one of the operands is a location, conjure a symbol for the other
+        // one (offset) if it's unknown so that memory arithmetic always
+        // results in an ElementRegion.
+        // TODO: This can be removed after we enable history tracking with
+        // SymSymExpr.
+        unsigned Count = currBldrCtx->blockCount();
+        if (LeftV.getAs<Loc>() &&
+            RHS->getType()->isIntegralOrEnumerationType() &&
+            RightV.isUnknown()) {
+          RightV = svalBuilder.conjureSymbolVal(RHS, LCtx, RHS->getType(),
+                                                Count);
+        }
+        if (RightV.getAs<Loc>() &&
+            LHS->getType()->isIntegralOrEnumerationType() &&
+            LeftV.isUnknown()) {
+          LeftV = svalBuilder.conjureSymbolVal(LHS, LCtx, LHS->getType(),
+                                               Count);
+        }
+      }
+
+      // Process non-assignments except commas or short-circuited
+      // logical expressions (LAnd and LOr).
+      SVal Result = evalBinOp(state, Op, LeftV, RightV, B->getType());      
+      if (Result.isUnknown()) {
+        Bldr.generateNode(B, *it, state);
+        continue;
+      }        
+
+      state = state->BindExpr(B, LCtx, Result);      
+      Bldr.generateNode(B, *it, state);
+      continue;
+    }
+      
+    assert (B->isCompoundAssignmentOp());
+    
+    switch (Op) {
+      default:
+        llvm_unreachable("Invalid opcode for compound assignment.");
+      case BO_MulAssign: Op = BO_Mul; break;
+      case BO_DivAssign: Op = BO_Div; break;
+      case BO_RemAssign: Op = BO_Rem; break;
+      case BO_AddAssign: Op = BO_Add; break;
+      case BO_SubAssign: Op = BO_Sub; break;
+      case BO_ShlAssign: Op = BO_Shl; break;
+      case BO_ShrAssign: Op = BO_Shr; break;
+      case BO_AndAssign: Op = BO_And; break;
+      case BO_XorAssign: Op = BO_Xor; break;
+      case BO_OrAssign:  Op = BO_Or;  break;
+    }
+      
+    // Perform a load (the LHS).  This performs the checks for
+    // null dereferences, and so on.
+    ExplodedNodeSet Tmp;
+    SVal location = LeftV;
+    evalLoad(Tmp, B, LHS, *it, state, location);
+    
+    for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I != E;
+         ++I) {
+
+      state = (*I)->getState();
+      const LocationContext *LCtx = (*I)->getLocationContext();
+      SVal V = state->getSVal(LHS, LCtx);
+      
+      // Get the computation type.
+      QualType CTy =
+        cast<CompoundAssignOperator>(B)->getComputationResultType();
+      CTy = getContext().getCanonicalType(CTy);
+      
+      QualType CLHSTy =
+        cast<CompoundAssignOperator>(B)->getComputationLHSType();
+      CLHSTy = getContext().getCanonicalType(CLHSTy);
+      
+      QualType LTy = getContext().getCanonicalType(LHS->getType());
+      
+      // Promote LHS.
+      V = svalBuilder.evalCast(V, CLHSTy, LTy);
+      
+      // Compute the result of the operation.
+      SVal Result = svalBuilder.evalCast(evalBinOp(state, Op, V, RightV, CTy),
+                                         B->getType(), CTy);
+      
+      // EXPERIMENTAL: "Conjured" symbols.
+      // FIXME: Handle structs.
+      
+      SVal LHSVal;
+      
+      if (Result.isUnknown()) {
+        // The symbolic value is actually for the type of the left-hand side
+        // expression, not the computation type, as this is the value the
+        // LValue on the LHS will bind to.
+        LHSVal = svalBuilder.conjureSymbolVal(0, B->getRHS(), LCtx, LTy,
+                                              currBldrCtx->blockCount());
+        // However, we need to convert the symbol to the computation type.
+        Result = svalBuilder.evalCast(LHSVal, CTy, LTy);
+      }
+      else {
+        // The left-hand side may bind to a different value then the
+        // computation type.
+        LHSVal = svalBuilder.evalCast(Result, LTy, CTy);
+      }
+      
+      // In C++, assignment and compound assignment operators return an 
+      // lvalue.
+      if (B->isGLValue())
+        state = state->BindExpr(B, LCtx, location);
+      else
+        state = state->BindExpr(B, LCtx, Result);
+      
+      evalStore(Tmp2, B, LHS, *I, state, location, LHSVal);
+    }
+  }
+  
+  // FIXME: postvisits eventually go in ::Visit()
+  getCheckerManager().runCheckersForPostStmt(Dst, Tmp2, B, *this);
+}
+
+void ExprEngine::VisitBlockExpr(const BlockExpr *BE, ExplodedNode *Pred,
+                                ExplodedNodeSet &Dst) {
+  
+  CanQualType T = getContext().getCanonicalType(BE->getType());
+
+  // Get the value of the block itself.
+  SVal V = svalBuilder.getBlockPointer(BE->getBlockDecl(), T,
+                                       Pred->getLocationContext());
+  
+  ProgramStateRef State = Pred->getState();
+  
+  // If we created a new MemRegion for the block, we should explicitly bind
+  // the captured variables.
+  if (const BlockDataRegion *BDR =
+      dyn_cast_or_null<BlockDataRegion>(V.getAsRegion())) {
+    
+    BlockDataRegion::referenced_vars_iterator I = BDR->referenced_vars_begin(),
+                                              E = BDR->referenced_vars_end();
+    
+    for (; I != E; ++I) {
+      const MemRegion *capturedR = I.getCapturedRegion();
+      const MemRegion *originalR = I.getOriginalRegion();
+      if (capturedR != originalR) {
+        SVal originalV = State->getSVal(loc::MemRegionVal(originalR));
+        State = State->bindLoc(loc::MemRegionVal(capturedR), originalV);
+      }
+    }
+  }
+  
+  ExplodedNodeSet Tmp;
+  StmtNodeBuilder Bldr(Pred, Tmp, *currBldrCtx);
+  Bldr.generateNode(BE, Pred,
+                    State->BindExpr(BE, Pred->getLocationContext(), V),
+                    0, ProgramPoint::PostLValueKind);
+  
+  // FIXME: Move all post/pre visits to ::Visit().
+  getCheckerManager().runCheckersForPostStmt(Dst, Tmp, BE, *this);
+}
+
+void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex, 
+                           ExplodedNode *Pred, ExplodedNodeSet &Dst) {
+  
+  ExplodedNodeSet dstPreStmt;
+  getCheckerManager().runCheckersForPreStmt(dstPreStmt, Pred, CastE, *this);
+  
+  if (CastE->getCastKind() == CK_LValueToRValue) {
+    for (ExplodedNodeSet::iterator I = dstPreStmt.begin(), E = dstPreStmt.end();
+         I!=E; ++I) {
+      ExplodedNode *subExprNode = *I;
+      ProgramStateRef state = subExprNode->getState();
+      const LocationContext *LCtx = subExprNode->getLocationContext();
+      evalLoad(Dst, CastE, CastE, subExprNode, state, state->getSVal(Ex, LCtx));
+    }
+    return;
+  }
+  
+  // All other casts.  
+  QualType T = CastE->getType();
+  QualType ExTy = Ex->getType();
+  
+  if (const ExplicitCastExpr *ExCast=dyn_cast_or_null<ExplicitCastExpr>(CastE))
+    T = ExCast->getTypeAsWritten();
+  
+  StmtNodeBuilder Bldr(dstPreStmt, Dst, *currBldrCtx);
+  for (ExplodedNodeSet::iterator I = dstPreStmt.begin(), E = dstPreStmt.end();
+       I != E; ++I) {
+    
+    Pred = *I;
+    ProgramStateRef state = Pred->getState();
+    const LocationContext *LCtx = Pred->getLocationContext();
+
+    switch (CastE->getCastKind()) {
+      case CK_LValueToRValue:
+        llvm_unreachable("LValueToRValue casts handled earlier.");
+      case CK_ToVoid:
+        continue;
+        // The analyzer doesn't do anything special with these casts,
+        // since it understands retain/release semantics already.
+      case CK_ARCProduceObject:
+      case CK_ARCConsumeObject:
+      case CK_ARCReclaimReturnedObject:
+      case CK_ARCExtendBlockObject: // Fall-through.
+      case CK_CopyAndAutoreleaseBlockObject:
+        // The analyser can ignore atomic casts for now, although some future
+        // checkers may want to make certain that you're not modifying the same
+        // value through atomic and nonatomic pointers.
+      case CK_AtomicToNonAtomic:
+      case CK_NonAtomicToAtomic:
+        // True no-ops.
+      case CK_NoOp:
+      case CK_ConstructorConversion:
+      case CK_UserDefinedConversion:
+      case CK_FunctionToPointerDecay:
+      case CK_BuiltinFnToFnPtr: {
+        // Copy the SVal of Ex to CastE.
+        ProgramStateRef state = Pred->getState();
+        const LocationContext *LCtx = Pred->getLocationContext();
+        SVal V = state->getSVal(Ex, LCtx);
+        state = state->BindExpr(CastE, LCtx, V);
+        Bldr.generateNode(CastE, Pred, state);
+        continue;
+      }
+      case CK_MemberPointerToBoolean:
+        // FIXME: For now, member pointers are represented by void *.
+        // FALLTHROUGH
+      case CK_Dependent:
+      case CK_ArrayToPointerDecay:
+      case CK_BitCast:
+      case CK_IntegralCast:
+      case CK_NullToPointer:
+      case CK_IntegralToPointer:
+      case CK_PointerToIntegral:
+      case CK_PointerToBoolean:
+      case CK_IntegralToBoolean:
+      case CK_IntegralToFloating:
+      case CK_FloatingToIntegral:
+      case CK_FloatingToBoolean:
+      case CK_FloatingCast:
+      case CK_FloatingRealToComplex:
+      case CK_FloatingComplexToReal:
+      case CK_FloatingComplexToBoolean:
+      case CK_FloatingComplexCast:
+      case CK_FloatingComplexToIntegralComplex:
+      case CK_IntegralRealToComplex:
+      case CK_IntegralComplexToReal:
+      case CK_IntegralComplexToBoolean:
+      case CK_IntegralComplexCast:
+      case CK_IntegralComplexToFloatingComplex:
+      case CK_CPointerToObjCPointerCast:
+      case CK_BlockPointerToObjCPointerCast:
+      case CK_AnyPointerToBlockPointerCast:  
+      case CK_ObjCObjectLValueCast: 
+      case CK_ZeroToOCLEvent: {
+        // Delegate to SValBuilder to process.
+        SVal V = state->getSVal(Ex, LCtx);
+        V = svalBuilder.evalCast(V, T, ExTy);
+        state = state->BindExpr(CastE, LCtx, V);
+        Bldr.generateNode(CastE, Pred, state);
+        continue;
+      }
+      case CK_DerivedToBase:
+      case CK_UncheckedDerivedToBase: {
+        // For DerivedToBase cast, delegate to the store manager.
+        SVal val = state->getSVal(Ex, LCtx);
+        val = getStoreManager().evalDerivedToBase(val, CastE);
+        state = state->BindExpr(CastE, LCtx, val);
+        Bldr.generateNode(CastE, Pred, state);
+        continue;
+      }
+      // Handle C++ dyn_cast.
+      case CK_Dynamic: {
+        SVal val = state->getSVal(Ex, LCtx);
+
+        // Compute the type of the result.
+        QualType resultType = CastE->getType();
+        if (CastE->isGLValue())
+          resultType = getContext().getPointerType(resultType);
+
+        bool Failed = false;
+
+        // Check if the value being cast evaluates to 0.
+        if (val.isZeroConstant())
+          Failed = true;
+        // Else, evaluate the cast.
+        else
+          val = getStoreManager().evalDynamicCast(val, T, Failed);
+
+        if (Failed) {
+          if (T->isReferenceType()) {
+            // A bad_cast exception is thrown if input value is a reference.
+            // Currently, we model this, by generating a sink.
+            Bldr.generateSink(CastE, Pred, state);
+            continue;
+          } else {
+            // If the cast fails on a pointer, bind to 0.
+            state = state->BindExpr(CastE, LCtx, svalBuilder.makeNull());
+          }
+        } else {
+          // If we don't know if the cast succeeded, conjure a new symbol.
+          if (val.isUnknown()) {
+            DefinedOrUnknownSVal NewSym =
+              svalBuilder.conjureSymbolVal(0, CastE, LCtx, resultType,
+                                           currBldrCtx->blockCount());
+            state = state->BindExpr(CastE, LCtx, NewSym);
+          } else 
+            // Else, bind to the derived region value.
+            state = state->BindExpr(CastE, LCtx, val);
+        }
+        Bldr.generateNode(CastE, Pred, state);
+        continue;
+      }
+      case CK_NullToMemberPointer: {
+        // FIXME: For now, member pointers are represented by void *.
+        SVal V = svalBuilder.makeIntValWithPtrWidth(0, true);
+        state = state->BindExpr(CastE, LCtx, V);
+        Bldr.generateNode(CastE, Pred, state);
+        continue;
+      }
+      // Various C++ casts that are not handled yet.
+      case CK_ToUnion:
+      case CK_BaseToDerived:
+      case CK_BaseToDerivedMemberPointer:
+      case CK_DerivedToBaseMemberPointer:
+      case CK_ReinterpretMemberPointer:
+      case CK_VectorSplat:
+      case CK_LValueBitCast: {
+        // Recover some path-sensitivty by conjuring a new value.
+        QualType resultType = CastE->getType();
+        if (CastE->isGLValue())
+          resultType = getContext().getPointerType(resultType);
+        SVal result = svalBuilder.conjureSymbolVal(0, CastE, LCtx,
+                                                   resultType,
+                                                   currBldrCtx->blockCount());
+        state = state->BindExpr(CastE, LCtx, result);
+        Bldr.generateNode(CastE, Pred, state);
+        continue;
+      }
+    }
+  }
+}
+
+void ExprEngine::VisitCompoundLiteralExpr(const CompoundLiteralExpr *CL,
+                                          ExplodedNode *Pred,
+                                          ExplodedNodeSet &Dst) {
+  StmtNodeBuilder B(Pred, Dst, *currBldrCtx);
+
+  ProgramStateRef State = Pred->getState();
+  const LocationContext *LCtx = Pred->getLocationContext();
+
+  const Expr *Init = CL->getInitializer();
+  SVal V = State->getSVal(CL->getInitializer(), LCtx);
+  
+  if (isa<CXXConstructExpr>(Init)) {
+    // No work needed. Just pass the value up to this expression.
+  } else {
+    assert(isa<InitListExpr>(Init));
+    Loc CLLoc = State->getLValue(CL, LCtx);
+    State = State->bindLoc(CLLoc, V);
+
+    // Compound literal expressions are a GNU extension in C++.
+    // Unlike in C, where CLs are lvalues, in C++ CLs are prvalues,
+    // and like temporary objects created by the functional notation T()
+    // CLs are destroyed at the end of the containing full-expression.
+    // HOWEVER, an rvalue of array type is not something the analyzer can
+    // reason about, since we expect all regions to be wrapped in Locs.
+    // So we treat array CLs as lvalues as well, knowing that they will decay
+    // to pointers as soon as they are used.
+    if (CL->isGLValue() || CL->getType()->isArrayType())
+      V = CLLoc;
+  }
+
+  B.generateNode(CL, Pred, State->BindExpr(CL, LCtx, V));
+}
+
+void ExprEngine::VisitDeclStmt(const DeclStmt *DS, ExplodedNode *Pred,
+                               ExplodedNodeSet &Dst) {
+  // Assumption: The CFG has one DeclStmt per Decl.
+  const VarDecl *VD = dyn_cast_or_null<VarDecl>(*DS->decl_begin());
+
+  if (!VD) {
+    //TODO:AZ: remove explicit insertion after refactoring is done.
+    Dst.insert(Pred);
+    return;
+  }
+  
+  // FIXME: all pre/post visits should eventually be handled by ::Visit().
+  ExplodedNodeSet dstPreVisit;
+  getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, DS, *this);
+  
+  StmtNodeBuilder B(dstPreVisit, Dst, *currBldrCtx);
+  for (ExplodedNodeSet::iterator I = dstPreVisit.begin(), E = dstPreVisit.end();
+       I!=E; ++I) {
+    ExplodedNode *N = *I;
+    ProgramStateRef state = N->getState();
+    const LocationContext *LC = N->getLocationContext();
+
+    // Decls without InitExpr are not initialized explicitly.
+    if (const Expr *InitEx = VD->getInit()) {
+
+      // Note in the state that the initialization has occurred.
+      ExplodedNode *UpdatedN = N;
+      SVal InitVal = state->getSVal(InitEx, LC);
+
+      if (isa<CXXConstructExpr>(InitEx->IgnoreImplicit())) {
+        // We constructed the object directly in the variable.
+        // No need to bind anything.
+        B.generateNode(DS, UpdatedN, state);
+      } else {
+        // We bound the temp obj region to the CXXConstructExpr. Now recover
+        // the lazy compound value when the variable is not a reference.
+        if (AMgr.getLangOpts().CPlusPlus && VD->getType()->isRecordType() &&
+            !VD->getType()->isReferenceType()) {
+          if (Optional<loc::MemRegionVal> M =
+                  InitVal.getAs<loc::MemRegionVal>()) {
+            InitVal = state->getSVal(M->getRegion());
+            assert(InitVal.getAs<nonloc::LazyCompoundVal>());
+          }
+        }
+        
+        // Recover some path-sensitivity if a scalar value evaluated to
+        // UnknownVal.
+        if (InitVal.isUnknown()) {
+          QualType Ty = InitEx->getType();
+          if (InitEx->isGLValue()) {
+            Ty = getContext().getPointerType(Ty);
+          }
+
+          InitVal = svalBuilder.conjureSymbolVal(0, InitEx, LC, Ty,
+                                                 currBldrCtx->blockCount());
+        }
+
+
+        B.takeNodes(UpdatedN);
+        ExplodedNodeSet Dst2;
+        evalBind(Dst2, DS, UpdatedN, state->getLValue(VD, LC), InitVal, true);
+        B.addNodes(Dst2);
+      }
+    }
+    else {
+      B.generateNode(DS, N, state);
+    }
+  }
+}
+
+void ExprEngine::VisitLogicalExpr(const BinaryOperator* B, ExplodedNode *Pred,
+                                  ExplodedNodeSet &Dst) {
+  assert(B->getOpcode() == BO_LAnd ||
+         B->getOpcode() == BO_LOr);
+
+  StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+  ProgramStateRef state = Pred->getState();
+
+  ExplodedNode *N = Pred;
+  while (!N->getLocation().getAs<BlockEntrance>()) {
+    ProgramPoint P = N->getLocation();
+    assert(P.getAs<PreStmt>()|| P.getAs<PreStmtPurgeDeadSymbols>());
+    (void) P;
+    assert(N->pred_size() == 1);
+    N = *N->pred_begin();
+  }
+  assert(N->pred_size() == 1);
+  N = *N->pred_begin();
+  BlockEdge BE = N->getLocation().castAs<BlockEdge>();
+  SVal X;
+
+  // Determine the value of the expression by introspecting how we
+  // got this location in the CFG.  This requires looking at the previous
+  // block we were in and what kind of control-flow transfer was involved.
+  const CFGBlock *SrcBlock = BE.getSrc();
+  // The only terminator (if there is one) that makes sense is a logical op.
+  CFGTerminator T = SrcBlock->getTerminator();
+  if (const BinaryOperator *Term = cast_or_null<BinaryOperator>(T.getStmt())) {
+    (void) Term;
+    assert(Term->isLogicalOp());
+    assert(SrcBlock->succ_size() == 2);
+    // Did we take the true or false branch?
+    unsigned constant = (*SrcBlock->succ_begin() == BE.getDst()) ? 1 : 0;
+    X = svalBuilder.makeIntVal(constant, B->getType());
+  }
+  else {
+    // If there is no terminator, by construction the last statement
+    // in SrcBlock is the value of the enclosing expression.
+    // However, we still need to constrain that value to be 0 or 1.
+    assert(!SrcBlock->empty());
+    CFGStmt Elem = SrcBlock->rbegin()->castAs<CFGStmt>();
+    const Expr *RHS = cast<Expr>(Elem.getStmt());
+    SVal RHSVal = N->getState()->getSVal(RHS, Pred->getLocationContext());
+
+    if (RHSVal.isUndef()) {
+      X = RHSVal;
+    } else {
+      DefinedOrUnknownSVal DefinedRHS = RHSVal.castAs<DefinedOrUnknownSVal>();
+      ProgramStateRef StTrue, StFalse;
+      llvm::tie(StTrue, StFalse) = N->getState()->assume(DefinedRHS);
+      if (StTrue) {
+        if (StFalse) {
+          // We can't constrain the value to 0 or 1.
+          // The best we can do is a cast.
+          X = getSValBuilder().evalCast(RHSVal, B->getType(), RHS->getType());
+        } else {
+          // The value is known to be true.
+          X = getSValBuilder().makeIntVal(1, B->getType());
+        }
+      } else {
+        // The value is known to be false.
+        assert(StFalse && "Infeasible path!");
+        X = getSValBuilder().makeIntVal(0, B->getType());
+      }
+    }
+  }
+  Bldr.generateNode(B, Pred, state->BindExpr(B, Pred->getLocationContext(), X));
+}
+
+void ExprEngine::VisitInitListExpr(const InitListExpr *IE,
+                                   ExplodedNode *Pred,
+                                   ExplodedNodeSet &Dst) {
+  StmtNodeBuilder B(Pred, Dst, *currBldrCtx);
+
+  ProgramStateRef state = Pred->getState();
+  const LocationContext *LCtx = Pred->getLocationContext();
+  QualType T = getContext().getCanonicalType(IE->getType());
+  unsigned NumInitElements = IE->getNumInits();
+  
+  if (T->isArrayType() || T->isRecordType() || T->isVectorType() ||
+      T->isAnyComplexType()) {
+    llvm::ImmutableList<SVal> vals = getBasicVals().getEmptySValList();
+    
+    // Handle base case where the initializer has no elements.
+    // e.g: static int* myArray[] = {};
+    if (NumInitElements == 0) {
+      SVal V = svalBuilder.makeCompoundVal(T, vals);
+      B.generateNode(IE, Pred, state->BindExpr(IE, LCtx, V));
+      return;
+    }
+    
+    for (InitListExpr::const_reverse_iterator it = IE->rbegin(),
+         ei = IE->rend(); it != ei; ++it) {
+      SVal V = state->getSVal(cast<Expr>(*it), LCtx);
+      if (dyn_cast_or_null<CXXTempObjectRegion>(V.getAsRegion()))
+        V = UnknownVal();
+      vals = getBasicVals().consVals(V, vals);
+    }
+    
+    B.generateNode(IE, Pred,
+                   state->BindExpr(IE, LCtx,
+                                   svalBuilder.makeCompoundVal(T, vals)));
+    return;
+  }
+
+  // Handle scalars: int{5} and int{}.
+  assert(NumInitElements <= 1);
+
+  SVal V;
+  if (NumInitElements == 0)
+    V = getSValBuilder().makeZeroVal(T);
+  else
+    V = state->getSVal(IE->getInit(0), LCtx);
+
+  B.generateNode(IE, Pred, state->BindExpr(IE, LCtx, V));
+}
+
+void ExprEngine::VisitGuardedExpr(const Expr *Ex,
+                                  const Expr *L, 
+                                  const Expr *R,
+                                  ExplodedNode *Pred,
+                                  ExplodedNodeSet &Dst) {
+  assert(L && R);
+
+  StmtNodeBuilder B(Pred, Dst, *currBldrCtx);
+  ProgramStateRef state = Pred->getState();
+  const LocationContext *LCtx = Pred->getLocationContext();
+  const CFGBlock *SrcBlock = 0;
+
+  // Find the predecessor block.
+  ProgramStateRef SrcState = state;
+  for (const ExplodedNode *N = Pred ; N ; N = *N->pred_begin()) {
+    ProgramPoint PP = N->getLocation();
+    if (PP.getAs<PreStmtPurgeDeadSymbols>() || PP.getAs<BlockEntrance>()) {
+      assert(N->pred_size() == 1);
+      continue;
+    }
+    SrcBlock = PP.castAs<BlockEdge>().getSrc();
+    SrcState = N->getState();
+    break;
+  }
+
+  assert(SrcBlock && "missing function entry");
+
+  // Find the last expression in the predecessor block.  That is the
+  // expression that is used for the value of the ternary expression.
+  bool hasValue = false;
+  SVal V;
+
+  for (CFGBlock::const_reverse_iterator I = SrcBlock->rbegin(),
+                                        E = SrcBlock->rend(); I != E; ++I) {
+    CFGElement CE = *I;
+    if (Optional<CFGStmt> CS = CE.getAs<CFGStmt>()) {
+      const Expr *ValEx = cast<Expr>(CS->getStmt());
+      ValEx = ValEx->IgnoreParens();
+
+      // For GNU extension '?:' operator, the left hand side will be an
+      // OpaqueValueExpr, so get the underlying expression.
+      if (const OpaqueValueExpr *OpaqueEx = dyn_cast<OpaqueValueExpr>(L))
+        L = OpaqueEx->getSourceExpr();
+
+      // If the last expression in the predecessor block matches true or false
+      // subexpression, get its the value.
+      if (ValEx == L->IgnoreParens() || ValEx == R->IgnoreParens()) {
+        hasValue = true;
+        V = SrcState->getSVal(ValEx, LCtx);
+      }
+      break;
+    }
+  }
+
+  if (!hasValue)
+    V = svalBuilder.conjureSymbolVal(0, Ex, LCtx, currBldrCtx->blockCount());
+
+  // Generate a new node with the binding from the appropriate path.
+  B.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V, true));
+}
+
+void ExprEngine::
+VisitOffsetOfExpr(const OffsetOfExpr *OOE, 
+                  ExplodedNode *Pred, ExplodedNodeSet &Dst) {
+  StmtNodeBuilder B(Pred, Dst, *currBldrCtx);
+  APSInt IV;
+  if (OOE->EvaluateAsInt(IV, getContext())) {
+    assert(IV.getBitWidth() == getContext().getTypeSize(OOE->getType()));
+    assert(OOE->getType()->isBuiltinType());
+    assert(OOE->getType()->getAs<BuiltinType>()->isInteger());
+    assert(IV.isSigned() == OOE->getType()->isSignedIntegerType());
+    SVal X = svalBuilder.makeIntVal(IV);
+    B.generateNode(OOE, Pred,
+                   Pred->getState()->BindExpr(OOE, Pred->getLocationContext(),
+                                              X));
+  }
+  // FIXME: Handle the case where __builtin_offsetof is not a constant.
+}
+
+
+void ExprEngine::
+VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *Ex,
+                              ExplodedNode *Pred,
+                              ExplodedNodeSet &Dst) {
+  StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+
+  QualType T = Ex->getTypeOfArgument();
+  
+  if (Ex->getKind() == UETT_SizeOf) {
+    if (!T->isIncompleteType() && !T->isConstantSizeType()) {
+      assert(T->isVariableArrayType() && "Unknown non-constant-sized type.");
+      
+      // FIXME: Add support for VLA type arguments and VLA expressions.
+      // When that happens, we should probably refactor VLASizeChecker's code.
+      return;
+    }
+    else if (T->getAs<ObjCObjectType>()) {
+      // Some code tries to take the sizeof an ObjCObjectType, relying that
+      // the compiler has laid out its representation.  Just report Unknown
+      // for these.
+      return;
+    }
+  }
+  
+  APSInt Value = Ex->EvaluateKnownConstInt(getContext());
+  CharUnits amt = CharUnits::fromQuantity(Value.getZExtValue());
+  
+  ProgramStateRef state = Pred->getState();
+  state = state->BindExpr(Ex, Pred->getLocationContext(),
+                          svalBuilder.makeIntVal(amt.getQuantity(),
+                                                     Ex->getType()));
+  Bldr.generateNode(Ex, Pred, state);
+}
+
+void ExprEngine::VisitUnaryOperator(const UnaryOperator* U, 
+                                    ExplodedNode *Pred,
+                                    ExplodedNodeSet &Dst) {
+  StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+  switch (U->getOpcode()) {
+    default: {
+      Bldr.takeNodes(Pred);
+      ExplodedNodeSet Tmp;
+      VisitIncrementDecrementOperator(U, Pred, Tmp);
+      Bldr.addNodes(Tmp);
+    }
+      break;
+    case UO_Real: {
+      const Expr *Ex = U->getSubExpr()->IgnoreParens();
+        
+      // FIXME: We don't have complex SValues yet.
+      if (Ex->getType()->isAnyComplexType()) {
+        // Just report "Unknown."
+        break;
+      }
+        
+      // For all other types, UO_Real is an identity operation.
+      assert (U->getType() == Ex->getType());
+      ProgramStateRef state = Pred->getState();
+      const LocationContext *LCtx = Pred->getLocationContext();
+      Bldr.generateNode(U, Pred, state->BindExpr(U, LCtx,
+                                                 state->getSVal(Ex, LCtx)));
+      break;
+    }
+      
+    case UO_Imag: {      
+      const Expr *Ex = U->getSubExpr()->IgnoreParens();
+      // FIXME: We don't have complex SValues yet.
+      if (Ex->getType()->isAnyComplexType()) {
+        // Just report "Unknown."
+        break;
+      }
+      // For all other types, UO_Imag returns 0.
+      ProgramStateRef state = Pred->getState();
+      const LocationContext *LCtx = Pred->getLocationContext();
+      SVal X = svalBuilder.makeZeroVal(Ex->getType());
+      Bldr.generateNode(U, Pred, state->BindExpr(U, LCtx, X));
+      break;
+    }
+      
+    case UO_Plus:
+      assert(!U->isGLValue());
+      // FALL-THROUGH.
+    case UO_Deref:
+    case UO_AddrOf:
+    case UO_Extension: {
+      // FIXME: We can probably just have some magic in Environment::getSVal()
+      // that propagates values, instead of creating a new node here.
+      //
+      // Unary "+" is a no-op, similar to a parentheses.  We still have places
+      // where it may be a block-level expression, so we need to
+      // generate an extra node that just propagates the value of the
+      // subexpression.      
+      const Expr *Ex = U->getSubExpr()->IgnoreParens();
+      ProgramStateRef state = Pred->getState();
+      const LocationContext *LCtx = Pred->getLocationContext();
+      Bldr.generateNode(U, Pred, state->BindExpr(U, LCtx,
+                                                 state->getSVal(Ex, LCtx)));
+      break;
+    }
+      
+    case UO_LNot:
+    case UO_Minus:
+    case UO_Not: {
+      assert (!U->isGLValue());
+      const Expr *Ex = U->getSubExpr()->IgnoreParens();
+      ProgramStateRef state = Pred->getState();
+      const LocationContext *LCtx = Pred->getLocationContext();
+        
+      // Get the value of the subexpression.
+      SVal V = state->getSVal(Ex, LCtx);
+        
+      if (V.isUnknownOrUndef()) {
+        Bldr.generateNode(U, Pred, state->BindExpr(U, LCtx, V));
+        break;
+      }
+        
+      switch (U->getOpcode()) {
+        default:
+          llvm_unreachable("Invalid Opcode.");
+        case UO_Not:
+          // FIXME: Do we need to handle promotions?
+          state = state->BindExpr(U, LCtx, evalComplement(V.castAs<NonLoc>()));
+          break;
+        case UO_Minus:
+          // FIXME: Do we need to handle promotions?
+          state = state->BindExpr(U, LCtx, evalMinus(V.castAs<NonLoc>()));
+          break;
+        case UO_LNot:
+          // C99 6.5.3.3: "The expression !E is equivalent to (0==E)."
+          //
+          //  Note: technically we do "E == 0", but this is the same in the
+          //    transfer functions as "0 == E".
+          SVal Result;          
+          if (Optional<Loc> LV = V.getAs<Loc>()) {
+            Loc X = svalBuilder.makeNull();
+            Result = evalBinOp(state, BO_EQ, *LV, X, U->getType());
+          }
+          else if (Ex->getType()->isFloatingType()) {
+            // FIXME: handle floating point types.
+            Result = UnknownVal();
+          } else {
+            nonloc::ConcreteInt X(getBasicVals().getValue(0, Ex->getType()));
+            Result = evalBinOp(state, BO_EQ, V.castAs<NonLoc>(), X,
+                               U->getType());
+          }
+          
+          state = state->BindExpr(U, LCtx, Result);          
+          break;
+      }
+      Bldr.generateNode(U, Pred, state);
+      break;
+    }
+  }
+
+}
+
+void ExprEngine::VisitIncrementDecrementOperator(const UnaryOperator* U,
+                                                 ExplodedNode *Pred,
+                                                 ExplodedNodeSet &Dst) {
+  // Handle ++ and -- (both pre- and post-increment).
+  assert (U->isIncrementDecrementOp());
+  const Expr *Ex = U->getSubExpr()->IgnoreParens();
+  
+  const LocationContext *LCtx = Pred->getLocationContext();
+  ProgramStateRef state = Pred->getState();
+  SVal loc = state->getSVal(Ex, LCtx);
+  
+  // Perform a load.
+  ExplodedNodeSet Tmp;
+  evalLoad(Tmp, U, Ex, Pred, state, loc);
+  
+  ExplodedNodeSet Dst2;
+  StmtNodeBuilder Bldr(Tmp, Dst2, *currBldrCtx);
+  for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end();I!=E;++I) {
+    
+    state = (*I)->getState();
+    assert(LCtx == (*I)->getLocationContext());
+    SVal V2_untested = state->getSVal(Ex, LCtx);
+    
+    // Propagate unknown and undefined values.
+    if (V2_untested.isUnknownOrUndef()) {
+      Bldr.generateNode(U, *I, state->BindExpr(U, LCtx, V2_untested));
+      continue;
+    }
+    DefinedSVal V2 = V2_untested.castAs<DefinedSVal>();
+    
+    // Handle all other values.
+    BinaryOperator::Opcode Op = U->isIncrementOp() ? BO_Add : BO_Sub;
+    
+    // If the UnaryOperator has non-location type, use its type to create the
+    // constant value. If the UnaryOperator has location type, create the
+    // constant with int type and pointer width.
+    SVal RHS;
+    
+    if (U->getType()->isAnyPointerType())
+      RHS = svalBuilder.makeArrayIndex(1);
+    else if (U->getType()->isIntegralOrEnumerationType())
+      RHS = svalBuilder.makeIntVal(1, U->getType());
+    else
+      RHS = UnknownVal();
+    
+    SVal Result = evalBinOp(state, Op, V2, RHS, U->getType());
+    
+    // Conjure a new symbol if necessary to recover precision.
+    if (Result.isUnknown()){
+      DefinedOrUnknownSVal SymVal =
+        svalBuilder.conjureSymbolVal(0, Ex, LCtx, currBldrCtx->blockCount());
+      Result = SymVal;
+      
+      // If the value is a location, ++/-- should always preserve
+      // non-nullness.  Check if the original value was non-null, and if so
+      // propagate that constraint.
+      if (Loc::isLocType(U->getType())) {
+        DefinedOrUnknownSVal Constraint =
+        svalBuilder.evalEQ(state, V2,svalBuilder.makeZeroVal(U->getType()));
+        
+        if (!state->assume(Constraint, true)) {
+          // It isn't feasible for the original value to be null.
+          // Propagate this constraint.
+          Constraint = svalBuilder.evalEQ(state, SymVal,
+                                       svalBuilder.makeZeroVal(U->getType()));
+          
+          
+          state = state->assume(Constraint, false);
+          assert(state);
+        }
+      }
+    }
+    
+    // Since the lvalue-to-rvalue conversion is explicit in the AST,
+    // we bind an l-value if the operator is prefix and an lvalue (in C++).
+    if (U->isGLValue())
+      state = state->BindExpr(U, LCtx, loc);
+    else
+      state = state->BindExpr(U, LCtx, U->isPostfix() ? V2 : Result);
+    
+    // Perform the store.
+    Bldr.takeNodes(*I);
+    ExplodedNodeSet Dst3;
+    evalStore(Dst3, U, U, *I, state, loc, Result);
+    Bldr.addNodes(Dst3);
+  }
+  Dst.insert(Dst2);
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
new file mode 100644
index 0000000..ed90dc5
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
@@ -0,0 +1,433 @@
+//===- ExprEngineCXX.cpp - ExprEngine support for C++ -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines the C++ expression evaluation engine.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+
+using namespace clang;
+using namespace ento;
+
+void ExprEngine::CreateCXXTemporaryObject(const MaterializeTemporaryExpr *ME,
+                                          ExplodedNode *Pred,
+                                          ExplodedNodeSet &Dst) {
+  StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+  const Expr *tempExpr = ME->GetTemporaryExpr()->IgnoreParens();
+  ProgramStateRef state = Pred->getState();
+  const LocationContext *LCtx = Pred->getLocationContext();
+
+  SVal V = state->getSVal(tempExpr, LCtx);
+
+  // If the value is already a CXXTempObjectRegion, it is fine as it is.
+  // Otherwise, create a new CXXTempObjectRegion, and copy the value into it.
+  // This is an optimization for when an rvalue is constructed and then
+  // immediately materialized.
+  const MemRegion *MR = V.getAsRegion();
+  if (const CXXTempObjectRegion *TR =
+        dyn_cast_or_null<CXXTempObjectRegion>(MR)) {
+    if (getContext().hasSameUnqualifiedType(TR->getValueType(), ME->getType()))
+      state = state->BindExpr(ME, LCtx, V);
+  }
+
+  if (state == Pred->getState())
+    state = createTemporaryRegionIfNeeded(state, LCtx, tempExpr, ME);
+  Bldr.generateNode(ME, Pred, state);
+}
+
+// FIXME: This is the sort of code that should eventually live in a Core
+// checker rather than as a special case in ExprEngine.
+void ExprEngine::performTrivialCopy(NodeBuilder &Bldr, ExplodedNode *Pred,
+                                    const CallEvent &Call) {
+  SVal ThisVal;
+  bool AlwaysReturnsLValue;
+  if (const CXXConstructorCall *Ctor = dyn_cast<CXXConstructorCall>(&Call)) {
+    assert(Ctor->getDecl()->isTrivial());
+    assert(Ctor->getDecl()->isCopyOrMoveConstructor());
+    ThisVal = Ctor->getCXXThisVal();
+    AlwaysReturnsLValue = false;
+  } else {
+    assert(cast<CXXMethodDecl>(Call.getDecl())->isTrivial());
+    assert(cast<CXXMethodDecl>(Call.getDecl())->getOverloadedOperator() ==
+           OO_Equal);
+    ThisVal = cast<CXXInstanceCall>(Call).getCXXThisVal();
+    AlwaysReturnsLValue = true;
+  }
+
+  const LocationContext *LCtx = Pred->getLocationContext();
+
+  ExplodedNodeSet Dst;
+  Bldr.takeNodes(Pred);
+
+  SVal V = Call.getArgSVal(0);
+
+  // If the value being copied is not unknown, load from its location to get
+  // an aggregate rvalue.
+  if (Optional<Loc> L = V.getAs<Loc>())
+    V = Pred->getState()->getSVal(*L);
+  else
+    assert(V.isUnknown());
+
+  const Expr *CallExpr = Call.getOriginExpr();
+  evalBind(Dst, CallExpr, Pred, ThisVal, V, true);
+
+  PostStmt PS(CallExpr, LCtx);
+  for (ExplodedNodeSet::iterator I = Dst.begin(), E = Dst.end();
+       I != E; ++I) {
+    ProgramStateRef State = (*I)->getState();
+    if (AlwaysReturnsLValue)
+      State = State->BindExpr(CallExpr, LCtx, ThisVal);
+    else
+      State = bindReturnValue(Call, LCtx, State);
+    Bldr.generateNode(PS, State, *I);
+  }
+}
+
+
+/// Returns a region representing the first element of a (possibly
+/// multi-dimensional) array.
+///
+/// On return, \p Ty will be set to the base type of the array.
+///
+/// If the type is not an array type at all, the original value is returned.
+static SVal makeZeroElementRegion(ProgramStateRef State, SVal LValue,
+                                  QualType &Ty) {
+  SValBuilder &SVB = State->getStateManager().getSValBuilder();
+  ASTContext &Ctx = SVB.getContext();
+
+  while (const ArrayType *AT = Ctx.getAsArrayType(Ty)) {
+    Ty = AT->getElementType();
+    LValue = State->getLValue(Ty, SVB.makeZeroArrayIndex(), LValue);
+  }
+
+  return LValue;
+}
+
+void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
+                                       ExplodedNode *Pred,
+                                       ExplodedNodeSet &destNodes) {
+  const LocationContext *LCtx = Pred->getLocationContext();
+  ProgramStateRef State = Pred->getState();
+
+  const MemRegion *Target = 0;
+
+  // FIXME: Handle arrays, which run the same constructor for every element.
+  // For now, we just run the first constructor (which should still invalidate
+  // the entire array).
+
+  switch (CE->getConstructionKind()) {
+  case CXXConstructExpr::CK_Complete: {
+    // See if we're constructing an existing region by looking at the next
+    // element in the CFG.
+    const CFGBlock *B = currBldrCtx->getBlock();
+    if (currStmtIdx + 1 < B->size()) {
+      CFGElement Next = (*B)[currStmtIdx+1];
+
+      // Is this a constructor for a local variable?
+      if (Optional<CFGStmt> StmtElem = Next.getAs<CFGStmt>()) {
+        if (const DeclStmt *DS = dyn_cast<DeclStmt>(StmtElem->getStmt())) {
+          if (const VarDecl *Var = dyn_cast<VarDecl>(DS->getSingleDecl())) {
+            if (Var->getInit()->IgnoreImplicit() == CE) {
+              SVal LValue = State->getLValue(Var, LCtx);
+              QualType Ty = Var->getType();
+              LValue = makeZeroElementRegion(State, LValue, Ty);
+              Target = LValue.getAsRegion();
+            }
+          }
+        }
+      }
+      
+      // Is this a constructor for a member?
+      if (Optional<CFGInitializer> InitElem = Next.getAs<CFGInitializer>()) {
+        const CXXCtorInitializer *Init = InitElem->getInitializer();
+        assert(Init->isAnyMemberInitializer());
+
+        const CXXMethodDecl *CurCtor = cast<CXXMethodDecl>(LCtx->getDecl());
+        Loc ThisPtr = getSValBuilder().getCXXThis(CurCtor,
+                                                  LCtx->getCurrentStackFrame());
+        SVal ThisVal = State->getSVal(ThisPtr);
+
+        const ValueDecl *Field;
+        SVal FieldVal;
+        if (Init->isIndirectMemberInitializer()) {
+          Field = Init->getIndirectMember();
+          FieldVal = State->getLValue(Init->getIndirectMember(), ThisVal);
+        } else {
+          Field = Init->getMember();
+          FieldVal = State->getLValue(Init->getMember(), ThisVal);
+        }
+
+        QualType Ty = Field->getType();
+        FieldVal = makeZeroElementRegion(State, FieldVal, Ty);
+        Target = FieldVal.getAsRegion();
+      }
+
+      // FIXME: This will eventually need to handle new-expressions as well.
+    }
+
+    // If we couldn't find an existing region to construct into, assume we're
+    // constructing a temporary.
+    if (!Target) {
+      MemRegionManager &MRMgr = getSValBuilder().getRegionManager();
+      Target = MRMgr.getCXXTempObjectRegion(CE, LCtx);
+    }
+
+    break;
+  }
+  case CXXConstructExpr::CK_NonVirtualBase:
+  case CXXConstructExpr::CK_VirtualBase:
+  case CXXConstructExpr::CK_Delegating: {
+    const CXXMethodDecl *CurCtor = cast<CXXMethodDecl>(LCtx->getDecl());
+    Loc ThisPtr = getSValBuilder().getCXXThis(CurCtor,
+                                              LCtx->getCurrentStackFrame());
+    SVal ThisVal = State->getSVal(ThisPtr);
+
+    if (CE->getConstructionKind() == CXXConstructExpr::CK_Delegating) {
+      Target = ThisVal.getAsRegion();
+    } else {
+      // Cast to the base type.
+      bool IsVirtual =
+        (CE->getConstructionKind() == CXXConstructExpr::CK_VirtualBase);
+      SVal BaseVal = getStoreManager().evalDerivedToBase(ThisVal, CE->getType(),
+                                                         IsVirtual);
+      Target = BaseVal.getAsRegion();
+    }
+    break;
+  }
+  }
+
+  CallEventManager &CEMgr = getStateManager().getCallEventManager();
+  CallEventRef<CXXConstructorCall> Call =
+    CEMgr.getCXXConstructorCall(CE, Target, State, LCtx);
+
+  ExplodedNodeSet DstPreVisit;
+  getCheckerManager().runCheckersForPreStmt(DstPreVisit, Pred, CE, *this);
+  ExplodedNodeSet DstPreCall;
+  getCheckerManager().runCheckersForPreCall(DstPreCall, DstPreVisit,
+                                            *Call, *this);
+
+  ExplodedNodeSet DstEvaluated;
+  StmtNodeBuilder Bldr(DstPreCall, DstEvaluated, *currBldrCtx);
+
+  bool IsArray = isa<ElementRegion>(Target);
+  if (CE->getConstructor()->isTrivial() &&
+      CE->getConstructor()->isCopyOrMoveConstructor() &&
+      !IsArray) {
+    // FIXME: Handle other kinds of trivial constructors as well.
+    for (ExplodedNodeSet::iterator I = DstPreCall.begin(), E = DstPreCall.end();
+         I != E; ++I)
+      performTrivialCopy(Bldr, *I, *Call);
+
+  } else {
+    for (ExplodedNodeSet::iterator I = DstPreCall.begin(), E = DstPreCall.end();
+         I != E; ++I)
+      defaultEvalCall(Bldr, *I, *Call);
+  }
+
+  ExplodedNodeSet DstPostCall;
+  getCheckerManager().runCheckersForPostCall(DstPostCall, DstEvaluated,
+                                             *Call, *this);
+  getCheckerManager().runCheckersForPostStmt(destNodes, DstPostCall, CE, *this);
+}
+
+void ExprEngine::VisitCXXDestructor(QualType ObjectType,
+                                    const MemRegion *Dest,
+                                    const Stmt *S,
+                                    bool IsBaseDtor,
+                                    ExplodedNode *Pred, 
+                                    ExplodedNodeSet &Dst) {
+  const LocationContext *LCtx = Pred->getLocationContext();
+  ProgramStateRef State = Pred->getState();
+
+  // FIXME: We need to run the same destructor on every element of the array.
+  // This workaround will just run the first destructor (which will still
+  // invalidate the entire array).
+  SVal DestVal = loc::MemRegionVal(Dest);
+  DestVal = makeZeroElementRegion(State, DestVal, ObjectType);
+  Dest = DestVal.getAsRegion();
+
+  const CXXRecordDecl *RecordDecl = ObjectType->getAsCXXRecordDecl();
+  assert(RecordDecl && "Only CXXRecordDecls should have destructors");
+  const CXXDestructorDecl *DtorDecl = RecordDecl->getDestructor();
+
+  CallEventManager &CEMgr = getStateManager().getCallEventManager();
+  CallEventRef<CXXDestructorCall> Call =
+    CEMgr.getCXXDestructorCall(DtorDecl, S, Dest, IsBaseDtor, State, LCtx);
+
+  PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
+                                Call->getSourceRange().getBegin(),
+                                "Error evaluating destructor");
+
+  ExplodedNodeSet DstPreCall;
+  getCheckerManager().runCheckersForPreCall(DstPreCall, Pred,
+                                            *Call, *this);
+
+  ExplodedNodeSet DstInvalidated;
+  StmtNodeBuilder Bldr(DstPreCall, DstInvalidated, *currBldrCtx);
+  for (ExplodedNodeSet::iterator I = DstPreCall.begin(), E = DstPreCall.end();
+       I != E; ++I)
+    defaultEvalCall(Bldr, *I, *Call);
+
+  ExplodedNodeSet DstPostCall;
+  getCheckerManager().runCheckersForPostCall(Dst, DstInvalidated,
+                                             *Call, *this);
+}
+
+void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
+                                   ExplodedNodeSet &Dst) {
+  // FIXME: Much of this should eventually migrate to CXXAllocatorCall.
+  // Also, we need to decide how allocators actually work -- they're not
+  // really part of the CXXNewExpr because they happen BEFORE the
+  // CXXConstructExpr subexpression. See PR12014 for some discussion.
+  
+  unsigned blockCount = currBldrCtx->blockCount();
+  const LocationContext *LCtx = Pred->getLocationContext();
+  DefinedOrUnknownSVal symVal = UnknownVal();
+  FunctionDecl *FD = CNE->getOperatorNew();
+
+  bool IsStandardGlobalOpNewFunction = false;
+  if (FD && !isa<CXXMethodDecl>(FD) && !FD->isVariadic()) {
+    if (FD->getNumParams() == 2) {
+      QualType T = FD->getParamDecl(1)->getType();
+      if (const IdentifierInfo *II = T.getBaseTypeIdentifier())
+        // NoThrow placement new behaves as a standard new.
+        IsStandardGlobalOpNewFunction = II->getName().equals("nothrow_t");
+    }
+    else
+      // Placement forms are considered non-standard.
+      IsStandardGlobalOpNewFunction = (FD->getNumParams() == 1);
+  }
+
+  // We assume all standard global 'operator new' functions allocate memory in 
+  // heap. We realize this is an approximation that might not correctly model 
+  // a custom global allocator.
+  if (IsStandardGlobalOpNewFunction)
+    symVal = svalBuilder.getConjuredHeapSymbolVal(CNE, LCtx, blockCount);
+  else
+    symVal = svalBuilder.conjureSymbolVal(0, CNE, LCtx, CNE->getType(), 
+                                          blockCount);
+
+  ProgramStateRef State = Pred->getState();
+  CallEventManager &CEMgr = getStateManager().getCallEventManager();
+  CallEventRef<CXXAllocatorCall> Call =
+    CEMgr.getCXXAllocatorCall(CNE, State, LCtx);
+
+  // Invalidate placement args.
+  // FIXME: Once we figure out how we want allocators to work,
+  // we should be using the usual pre-/(default-)eval-/post-call checks here.
+  State = Call->invalidateRegions(blockCount);
+  if (!State)
+    return;
+
+  // If we're compiling with exceptions enabled, and this allocation function
+  // is not declared as non-throwing, failures /must/ be signalled by
+  // exceptions, and thus the return value will never be NULL.
+  // C++11 [basic.stc.dynamic.allocation]p3.
+  if (FD && getContext().getLangOpts().CXXExceptions) {
+    QualType Ty = FD->getType();
+    if (const FunctionProtoType *ProtoType = Ty->getAs<FunctionProtoType>())
+      if (!ProtoType->isNothrow(getContext()))
+        State = State->assume(symVal, true);
+  }
+
+  StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+
+  if (CNE->isArray()) {
+    // FIXME: allocating an array requires simulating the constructors.
+    // For now, just return a symbolicated region.
+    const MemRegion *NewReg = symVal.castAs<loc::MemRegionVal>().getRegion();
+    QualType ObjTy = CNE->getType()->getAs<PointerType>()->getPointeeType();
+    const ElementRegion *EleReg =
+      getStoreManager().GetElementZeroRegion(NewReg, ObjTy);
+    State = State->BindExpr(CNE, Pred->getLocationContext(),
+                            loc::MemRegionVal(EleReg));
+    Bldr.generateNode(CNE, Pred, State);
+    return;
+  }
+
+  // FIXME: Once we have proper support for CXXConstructExprs inside
+  // CXXNewExpr, we need to make sure that the constructed object is not
+  // immediately invalidated here. (The placement call should happen before
+  // the constructor call anyway.)
+  SVal Result = symVal;
+  if (FD && FD->isReservedGlobalPlacementOperator()) {
+    // Non-array placement new should always return the placement location.
+    SVal PlacementLoc = State->getSVal(CNE->getPlacementArg(0), LCtx);
+    Result = svalBuilder.evalCast(PlacementLoc, CNE->getType(),
+                                  CNE->getPlacementArg(0)->getType());
+  }
+
+  // Bind the address of the object, then check to see if we cached out.
+  State = State->BindExpr(CNE, LCtx, Result);
+  ExplodedNode *NewN = Bldr.generateNode(CNE, Pred, State);
+  if (!NewN)
+    return;
+
+  // If the type is not a record, we won't have a CXXConstructExpr as an
+  // initializer. Copy the value over.
+  if (const Expr *Init = CNE->getInitializer()) {
+    if (!isa<CXXConstructExpr>(Init)) {
+      assert(Bldr.getResults().size() == 1);
+      Bldr.takeNodes(NewN);
+
+      assert(!CNE->getType()->getPointeeCXXRecordDecl());
+      evalBind(Dst, CNE, NewN, Result, State->getSVal(Init, LCtx),
+               /*FirstInit=*/IsStandardGlobalOpNewFunction);
+    }
+  }
+}
+
+void ExprEngine::VisitCXXDeleteExpr(const CXXDeleteExpr *CDE, 
+                                    ExplodedNode *Pred, ExplodedNodeSet &Dst) {
+  StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+  ProgramStateRef state = Pred->getState();
+  Bldr.generateNode(CDE, Pred, state);
+}
+
+void ExprEngine::VisitCXXCatchStmt(const CXXCatchStmt *CS,
+                                   ExplodedNode *Pred,
+                                   ExplodedNodeSet &Dst) {
+  const VarDecl *VD = CS->getExceptionDecl();
+  if (!VD) {
+    Dst.Add(Pred);
+    return;
+  }
+
+  const LocationContext *LCtx = Pred->getLocationContext();
+  SVal V = svalBuilder.conjureSymbolVal(CS, LCtx, VD->getType(),
+                                        currBldrCtx->blockCount());
+  ProgramStateRef state = Pred->getState();
+  state = state->bindLoc(state->getLValue(VD, LCtx), V);
+
+  StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+  Bldr.generateNode(CS, Pred, state);
+}
+
+void ExprEngine::VisitCXXThisExpr(const CXXThisExpr *TE, ExplodedNode *Pred,
+                                    ExplodedNodeSet &Dst) {
+  StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
+
+  // Get the this object region from StoreManager.
+  const LocationContext *LCtx = Pred->getLocationContext();
+  const MemRegion *R =
+    svalBuilder.getRegionManager().getCXXThisRegion(
+                                  getContext().getCanonicalType(TE->getType()),
+                                                    LCtx);
+
+  ProgramStateRef state = Pred->getState();
+  SVal V = state->getSVal(loc::MemRegionVal(R));
+  Bldr.generateNode(TE, Pred, state->BindExpr(TE, LCtx, V));
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
new file mode 100644
index 0000000..06570a4
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
@@ -0,0 +1,970 @@
+//=-- ExprEngineCallAndReturn.cpp - Support for call/return -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines ExprEngine's support for calls and returns.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "ExprEngine"
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/SaveAndRestore.h"
+
+using namespace clang;
+using namespace ento;
+
+STATISTIC(NumOfDynamicDispatchPathSplits,
+  "The # of times we split the path due to imprecise dynamic dispatch info");
+
+STATISTIC(NumInlinedCalls,
+  "The # of times we inlined a call");
+
+STATISTIC(NumReachedInlineCountMax,
+  "The # of times we reached inline count maximum");
+
+void ExprEngine::processCallEnter(CallEnter CE, ExplodedNode *Pred) {
+  // Get the entry block in the CFG of the callee.
+  const StackFrameContext *calleeCtx = CE.getCalleeContext();
+  const CFG *CalleeCFG = calleeCtx->getCFG();
+  const CFGBlock *Entry = &(CalleeCFG->getEntry());
+  
+  // Validate the CFG.
+  assert(Entry->empty());
+  assert(Entry->succ_size() == 1);
+  
+  // Get the solitary sucessor.
+  const CFGBlock *Succ = *(Entry->succ_begin());
+  
+  // Construct an edge representing the starting location in the callee.
+  BlockEdge Loc(Entry, Succ, calleeCtx);
+
+  ProgramStateRef state = Pred->getState();
+  
+  // Construct a new node and add it to the worklist.
+  bool isNew;
+  ExplodedNode *Node = G.getNode(Loc, state, false, &isNew);
+  Node->addPredecessor(Pred, G);
+  if (isNew)
+    Engine.getWorkList()->enqueue(Node);
+}
+
+// Find the last statement on the path to the exploded node and the
+// corresponding Block.
+static std::pair<const Stmt*,
+                 const CFGBlock*> getLastStmt(const ExplodedNode *Node) {
+  const Stmt *S = 0;
+  const CFGBlock *Blk = 0;
+  const StackFrameContext *SF =
+          Node->getLocation().getLocationContext()->getCurrentStackFrame();
+
+  // Back up through the ExplodedGraph until we reach a statement node in this
+  // stack frame.
+  while (Node) {
+    const ProgramPoint &PP = Node->getLocation();
+
+    if (PP.getLocationContext()->getCurrentStackFrame() == SF) {
+      if (Optional<StmtPoint> SP = PP.getAs<StmtPoint>()) {
+        S = SP->getStmt();
+        break;
+      } else if (Optional<CallExitEnd> CEE = PP.getAs<CallExitEnd>()) {
+        S = CEE->getCalleeContext()->getCallSite();
+        if (S)
+          break;
+
+        // If there is no statement, this is an implicitly-generated call.
+        // We'll walk backwards over it and then continue the loop to find
+        // an actual statement.
+        Optional<CallEnter> CE;
+        do {
+          Node = Node->getFirstPred();
+          CE = Node->getLocationAs<CallEnter>();
+        } while (!CE || CE->getCalleeContext() != CEE->getCalleeContext());
+
+        // Continue searching the graph.
+      } else if (Optional<BlockEdge> BE = PP.getAs<BlockEdge>()) {
+        Blk = BE->getSrc();
+      }
+    } else if (Optional<CallEnter> CE = PP.getAs<CallEnter>()) {
+      // If we reached the CallEnter for this function, it has no statements.
+      if (CE->getCalleeContext() == SF)
+        break;
+    }
+
+    if (Node->pred_empty())
+      return std::pair<const Stmt*, const CFGBlock*>((Stmt*)0, (CFGBlock*)0);
+
+    Node = *Node->pred_begin();
+  }
+
+  return std::pair<const Stmt*, const CFGBlock*>(S, Blk);
+}
+
+/// Adjusts a return value when the called function's return type does not
+/// match the caller's expression type. This can happen when a dynamic call
+/// is devirtualized, and the overridding method has a covariant (more specific)
+/// return type than the parent's method. For C++ objects, this means we need
+/// to add base casts.
+static SVal adjustReturnValue(SVal V, QualType ExpectedTy, QualType ActualTy,
+                              StoreManager &StoreMgr) {
+  // For now, the only adjustments we handle apply only to locations.
+  if (!V.getAs<Loc>())
+    return V;
+
+  // If the types already match, don't do any unnecessary work.
+  ExpectedTy = ExpectedTy.getCanonicalType();
+  ActualTy = ActualTy.getCanonicalType();
+  if (ExpectedTy == ActualTy)
+    return V;
+
+  // No adjustment is needed between Objective-C pointer types.
+  if (ExpectedTy->isObjCObjectPointerType() &&
+      ActualTy->isObjCObjectPointerType())
+    return V;
+
+  // C++ object pointers may need "derived-to-base" casts.
+  const CXXRecordDecl *ExpectedClass = ExpectedTy->getPointeeCXXRecordDecl();
+  const CXXRecordDecl *ActualClass = ActualTy->getPointeeCXXRecordDecl();
+  if (ExpectedClass && ActualClass) {
+    CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+                       /*DetectVirtual=*/false);
+    if (ActualClass->isDerivedFrom(ExpectedClass, Paths) &&
+        !Paths.isAmbiguous(ActualTy->getCanonicalTypeUnqualified())) {
+      return StoreMgr.evalDerivedToBase(V, Paths.front());
+    }
+  }
+
+  // Unfortunately, Objective-C does not enforce that overridden methods have
+  // covariant return types, so we can't assert that that never happens.
+  // Be safe and return UnknownVal().
+  return UnknownVal();
+}
+
+void ExprEngine::removeDeadOnEndOfFunction(NodeBuilderContext& BC,
+                                           ExplodedNode *Pred,
+                                           ExplodedNodeSet &Dst) {
+  // Find the last statement in the function and the corresponding basic block.
+  const Stmt *LastSt = 0;
+  const CFGBlock *Blk = 0;
+  llvm::tie(LastSt, Blk) = getLastStmt(Pred);
+  if (!Blk || !LastSt) {
+    Dst.Add(Pred);
+    return;
+  }
+
+  // Here, we destroy the current location context. We use the current
+  // function's entire body as a diagnostic statement, with which the program
+  // point will be associated. However, we only want to use LastStmt as a
+  // reference for what to clean up if it's a ReturnStmt; otherwise, everything
+  // is dead.
+  SaveAndRestore<const NodeBuilderContext *> NodeContextRAII(currBldrCtx, &BC);
+  const LocationContext *LCtx = Pred->getLocationContext();
+  removeDead(Pred, Dst, dyn_cast<ReturnStmt>(LastSt), LCtx,
+             LCtx->getAnalysisDeclContext()->getBody(),
+             ProgramPoint::PostStmtPurgeDeadSymbolsKind);
+}
+
+static bool wasDifferentDeclUsedForInlining(CallEventRef<> Call,
+    const StackFrameContext *calleeCtx) {
+  const Decl *RuntimeCallee = calleeCtx->getDecl();
+  const Decl *StaticDecl = Call->getDecl();
+  assert(RuntimeCallee);
+  if (!StaticDecl)
+    return true;
+  return RuntimeCallee->getCanonicalDecl() != StaticDecl->getCanonicalDecl();
+}
+
+/// Returns true if the CXXConstructExpr \p E was intended to construct a
+/// prvalue for the region in \p V.
+///
+/// Note that we can't just test for rvalue vs. glvalue because
+/// CXXConstructExprs embedded in DeclStmts and initializers are considered
+/// rvalues by the AST, and the analyzer would like to treat them as lvalues.
+static bool isTemporaryPRValue(const CXXConstructExpr *E, SVal V) {
+  if (E->isGLValue())
+    return false;
+
+  const MemRegion *MR = V.getAsRegion();
+  if (!MR)
+    return false;
+
+  return isa<CXXTempObjectRegion>(MR);
+}
+
+/// The call exit is simulated with a sequence of nodes, which occur between 
+/// CallExitBegin and CallExitEnd. The following operations occur between the 
+/// two program points:
+/// 1. CallExitBegin (triggers the start of call exit sequence)
+/// 2. Bind the return value
+/// 3. Run Remove dead bindings to clean up the dead symbols from the callee.
+/// 4. CallExitEnd (switch to the caller context)
+/// 5. PostStmt<CallExpr>
+void ExprEngine::processCallExit(ExplodedNode *CEBNode) {
+  // Step 1 CEBNode was generated before the call.
+
+  const StackFrameContext *calleeCtx =
+      CEBNode->getLocationContext()->getCurrentStackFrame();
+  
+  // The parent context might not be a stack frame, so make sure we
+  // look up the first enclosing stack frame.
+  const StackFrameContext *callerCtx =
+    calleeCtx->getParent()->getCurrentStackFrame();
+  
+  const Stmt *CE = calleeCtx->getCallSite();
+  ProgramStateRef state = CEBNode->getState();
+  // Find the last statement in the function and the corresponding basic block.
+  const Stmt *LastSt = 0;
+  const CFGBlock *Blk = 0;
+  llvm::tie(LastSt, Blk) = getLastStmt(CEBNode);
+
+  // Generate a CallEvent /before/ cleaning the state, so that we can get the
+  // correct value for 'this' (if necessary).
+  CallEventManager &CEMgr = getStateManager().getCallEventManager();
+  CallEventRef<> Call = CEMgr.getCaller(calleeCtx, state);
+
+  // Step 2: generate node with bound return value: CEBNode -> BindedRetNode.
+
+  // If the callee returns an expression, bind its value to CallExpr.
+  if (CE) {
+    if (const ReturnStmt *RS = dyn_cast_or_null<ReturnStmt>(LastSt)) {
+      const LocationContext *LCtx = CEBNode->getLocationContext();
+      SVal V = state->getSVal(RS, LCtx);
+
+      // Ensure that the return type matches the type of the returned Expr.
+      if (wasDifferentDeclUsedForInlining(Call, calleeCtx)) {
+        QualType ReturnedTy =
+          CallEvent::getDeclaredResultType(calleeCtx->getDecl());
+        if (!ReturnedTy.isNull()) {
+          if (const Expr *Ex = dyn_cast<Expr>(CE)) {
+            V = adjustReturnValue(V, Ex->getType(), ReturnedTy,
+                                  getStoreManager());
+          }
+        }
+      }
+
+      state = state->BindExpr(CE, callerCtx, V);
+    }
+
+    // Bind the constructed object value to CXXConstructExpr.
+    if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(CE)) {
+      loc::MemRegionVal This =
+        svalBuilder.getCXXThis(CCE->getConstructor()->getParent(), calleeCtx);
+      SVal ThisV = state->getSVal(This);
+
+      // If the constructed object is a temporary prvalue, get its bindings.
+      if (isTemporaryPRValue(CCE, ThisV))
+        ThisV = state->getSVal(ThisV.castAs<Loc>());
+
+      state = state->BindExpr(CCE, callerCtx, ThisV);
+    }
+  }
+
+  // Step 3: BindedRetNode -> CleanedNodes
+  // If we can find a statement and a block in the inlined function, run remove
+  // dead bindings before returning from the call. This is important to ensure
+  // that we report the issues such as leaks in the stack contexts in which
+  // they occurred.
+  ExplodedNodeSet CleanedNodes;
+  if (LastSt && Blk && AMgr.options.AnalysisPurgeOpt != PurgeNone) {
+    static SimpleProgramPointTag retValBind("ExprEngine : Bind Return Value");
+    PostStmt Loc(LastSt, calleeCtx, &retValBind);
+    bool isNew;
+    ExplodedNode *BindedRetNode = G.getNode(Loc, state, false, &isNew);
+    BindedRetNode->addPredecessor(CEBNode, G);
+    if (!isNew)
+      return;
+
+    NodeBuilderContext Ctx(getCoreEngine(), Blk, BindedRetNode);
+    currBldrCtx = &Ctx;
+    // Here, we call the Symbol Reaper with 0 statement and callee location
+    // context, telling it to clean up everything in the callee's context
+    // (and its children). We use the callee's function body as a diagnostic
+    // statement, with which the program point will be associated.
+    removeDead(BindedRetNode, CleanedNodes, 0, calleeCtx,
+               calleeCtx->getAnalysisDeclContext()->getBody(),
+               ProgramPoint::PostStmtPurgeDeadSymbolsKind);
+    currBldrCtx = 0;
+  } else {
+    CleanedNodes.Add(CEBNode);
+  }
+
+  for (ExplodedNodeSet::iterator I = CleanedNodes.begin(),
+                                 E = CleanedNodes.end(); I != E; ++I) {
+
+    // Step 4: Generate the CallExit and leave the callee's context.
+    // CleanedNodes -> CEENode
+    CallExitEnd Loc(calleeCtx, callerCtx);
+    bool isNew;
+    ProgramStateRef CEEState = (*I == CEBNode) ? state : (*I)->getState();
+    ExplodedNode *CEENode = G.getNode(Loc, CEEState, false, &isNew);
+    CEENode->addPredecessor(*I, G);
+    if (!isNew)
+      return;
+
+    // Step 5: Perform the post-condition check of the CallExpr and enqueue the
+    // result onto the work list.
+    // CEENode -> Dst -> WorkList
+    NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), CEENode);
+    SaveAndRestore<const NodeBuilderContext*> NBCSave(currBldrCtx,
+        &Ctx);
+    SaveAndRestore<unsigned> CBISave(currStmtIdx, calleeCtx->getIndex());
+
+    CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState);
+
+    ExplodedNodeSet DstPostCall;
+    getCheckerManager().runCheckersForPostCall(DstPostCall, CEENode,
+                                               *UpdatedCall, *this,
+                                               /*WasInlined=*/true);
+
+    ExplodedNodeSet Dst;
+    if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(Call)) {
+      getCheckerManager().runCheckersForPostObjCMessage(Dst, DstPostCall, *Msg,
+                                                        *this,
+                                                        /*WasInlined=*/true);
+    } else if (CE) {
+      getCheckerManager().runCheckersForPostStmt(Dst, DstPostCall, CE,
+                                                 *this, /*WasInlined=*/true);
+    } else {
+      Dst.insert(DstPostCall);
+    }
+
+    // Enqueue the next element in the block.
+    for (ExplodedNodeSet::iterator PSI = Dst.begin(), PSE = Dst.end();
+                                   PSI != PSE; ++PSI) {
+      Engine.getWorkList()->enqueue(*PSI, calleeCtx->getCallSiteBlock(),
+                                    calleeCtx->getIndex()+1);
+    }
+  }
+}
+
+void ExprEngine::examineStackFrames(const Decl *D, const LocationContext *LCtx,
+                               bool &IsRecursive, unsigned &StackDepth) {
+  IsRecursive = false;
+  StackDepth = 0;
+
+  while (LCtx) {
+    if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LCtx)) {
+      const Decl *DI = SFC->getDecl();
+
+      // Mark recursive (and mutually recursive) functions and always count
+      // them when measuring the stack depth.
+      if (DI == D) {
+        IsRecursive = true;
+        ++StackDepth;
+        LCtx = LCtx->getParent();
+        continue;
+      }
+
+      // Do not count the small functions when determining the stack depth.
+      AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(DI);
+      const CFG *CalleeCFG = CalleeADC->getCFG();
+      if (CalleeCFG->getNumBlockIDs() > AMgr.options.getAlwaysInlineSize())
+        ++StackDepth;
+    }
+    LCtx = LCtx->getParent();
+  }
+
+}
+
+static bool IsInStdNamespace(const FunctionDecl *FD) {
+  const DeclContext *DC = FD->getEnclosingNamespaceContext();
+  const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC);
+  if (!ND)
+    return false;
+  
+  while (const DeclContext *Parent = ND->getParent()) {
+    if (!isa<NamespaceDecl>(Parent))
+      break;
+    ND = cast<NamespaceDecl>(Parent);
+  }
+
+  return ND->getName() == "std";
+}
+
+// The GDM component containing the dynamic dispatch bifurcation info. When
+// the exact type of the receiver is not known, we want to explore both paths -
+// one on which we do inline it and the other one on which we don't. This is
+// done to ensure we do not drop coverage.
+// This is the map from the receiver region to a bool, specifying either we
+// consider this region's information precise or not along the given path.
+namespace {
+  enum DynamicDispatchMode {
+    DynamicDispatchModeInlined = 1,
+    DynamicDispatchModeConservative
+  };
+}
+REGISTER_TRAIT_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap,
+                                 CLANG_ENTO_PROGRAMSTATE_MAP(const MemRegion *,
+                                                             unsigned))
+
+bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
+                            NodeBuilder &Bldr, ExplodedNode *Pred,
+                            ProgramStateRef State) {
+  assert(D);
+
+  const LocationContext *CurLC = Pred->getLocationContext();
+  const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame();
+  const LocationContext *ParentOfCallee = CallerSFC;
+  if (Call.getKind() == CE_Block) {
+    const BlockDataRegion *BR = cast<BlockCall>(Call).getBlockRegion();
+    assert(BR && "If we have the block definition we should have its region");
+    AnalysisDeclContext *BlockCtx = AMgr.getAnalysisDeclContext(D);
+    ParentOfCallee = BlockCtx->getBlockInvocationContext(CallerSFC,
+                                                         cast<BlockDecl>(D),
+                                                         BR);
+  }
+  
+  // This may be NULL, but that's fine.
+  const Expr *CallE = Call.getOriginExpr();
+
+  // Construct a new stack frame for the callee.
+  AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D);
+  const StackFrameContext *CalleeSFC =
+    CalleeADC->getStackFrame(ParentOfCallee, CallE,
+                             currBldrCtx->getBlock(),
+                             currStmtIdx);
+  
+    
+  CallEnter Loc(CallE, CalleeSFC, CurLC);
+
+  // Construct a new state which contains the mapping from actual to
+  // formal arguments.
+  State = State->enterStackFrame(Call, CalleeSFC);
+
+  bool isNew;
+  if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) {
+    N->addPredecessor(Pred, G);
+    if (isNew)
+      Engine.getWorkList()->enqueue(N);
+  }
+
+  // If we decided to inline the call, the successor has been manually
+  // added onto the work list so remove it from the node builder.
+  Bldr.takeNodes(Pred);
+
+  NumInlinedCalls++;
+
+  // Mark the decl as visited.
+  if (VisitedCallees)
+    VisitedCallees->insert(D);
+
+  return true;
+}
+
+static ProgramStateRef getInlineFailedState(ProgramStateRef State,
+                                            const Stmt *CallE) {
+  const void *ReplayState = State->get<ReplayWithoutInlining>();
+  if (!ReplayState)
+    return 0;
+
+  assert(ReplayState == CallE && "Backtracked to the wrong call.");
+  (void)CallE;
+
+  return State->remove<ReplayWithoutInlining>();
+}
+
+void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
+                               ExplodedNodeSet &dst) {
+  // Perform the previsit of the CallExpr.
+  ExplodedNodeSet dstPreVisit;
+  getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, CE, *this);
+
+  // Get the call in its initial state. We use this as a template to perform
+  // all the checks.
+  CallEventManager &CEMgr = getStateManager().getCallEventManager();
+  CallEventRef<> CallTemplate
+    = CEMgr.getSimpleCall(CE, Pred->getState(), Pred->getLocationContext());
+
+  // Evaluate the function call.  We try each of the checkers
+  // to see if the can evaluate the function call.
+  ExplodedNodeSet dstCallEvaluated;
+  for (ExplodedNodeSet::iterator I = dstPreVisit.begin(), E = dstPreVisit.end();
+       I != E; ++I) {
+    evalCall(dstCallEvaluated, *I, *CallTemplate);
+  }
+
+  // Finally, perform the post-condition check of the CallExpr and store
+  // the created nodes in 'Dst'.
+  // Note that if the call was inlined, dstCallEvaluated will be empty.
+  // The post-CallExpr check will occur in processCallExit.
+  getCheckerManager().runCheckersForPostStmt(dst, dstCallEvaluated, CE,
+                                             *this);
+}
+
+void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
+                          const CallEvent &Call) {
+  // WARNING: At this time, the state attached to 'Call' may be older than the
+  // state in 'Pred'. This is a minor optimization since CheckerManager will
+  // use an updated CallEvent instance when calling checkers, but if 'Call' is
+  // ever used directly in this function all callers should be updated to pass
+  // the most recent state. (It is probably not worth doing the work here since
+  // for some callers this will not be necessary.)
+
+  // Run any pre-call checks using the generic call interface.
+  ExplodedNodeSet dstPreVisit;
+  getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred, Call, *this);
+
+  // Actually evaluate the function call.  We try each of the checkers
+  // to see if the can evaluate the function call, and get a callback at
+  // defaultEvalCall if all of them fail.
+  ExplodedNodeSet dstCallEvaluated;
+  getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit,
+                                             Call, *this);
+
+  // Finally, run any post-call checks.
+  getCheckerManager().runCheckersForPostCall(Dst, dstCallEvaluated,
+                                             Call, *this);
+}
+
+ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
+                                            const LocationContext *LCtx,
+                                            ProgramStateRef State) {
+  const Expr *E = Call.getOriginExpr();
+  if (!E)
+    return State;
+
+  // Some method families have known return values.
+  if (const ObjCMethodCall *Msg = dyn_cast<ObjCMethodCall>(&Call)) {
+    switch (Msg->getMethodFamily()) {
+    default:
+      break;
+    case OMF_autorelease:
+    case OMF_retain:
+    case OMF_self: {
+      // These methods return their receivers.
+      return State->BindExpr(E, LCtx, Msg->getReceiverSVal());
+    }
+    }
+  } else if (const CXXConstructorCall *C = dyn_cast<CXXConstructorCall>(&Call)){
+    SVal ThisV = C->getCXXThisVal();
+
+    // If the constructed object is a temporary prvalue, get its bindings.
+    if (isTemporaryPRValue(cast<CXXConstructExpr>(E), ThisV))
+      ThisV = State->getSVal(ThisV.castAs<Loc>());
+
+    return State->BindExpr(E, LCtx, ThisV);
+  }
+
+  // Conjure a symbol if the return value is unknown.
+  QualType ResultTy = Call.getResultType();
+  SValBuilder &SVB = getSValBuilder();
+  unsigned Count = currBldrCtx->blockCount();
+  SVal R = SVB.conjureSymbolVal(0, E, LCtx, ResultTy, Count);
+  return State->BindExpr(E, LCtx, R);
+}
+
+// Conservatively evaluate call by invalidating regions and binding
+// a conjured return value.
+void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr,
+                                      ExplodedNode *Pred,
+                                      ProgramStateRef State) {
+  State = Call.invalidateRegions(currBldrCtx->blockCount(), State);
+  State = bindReturnValue(Call, Pred->getLocationContext(), State);
+
+  // And make the result node.
+  Bldr.generateNode(Call.getProgramPoint(), State, Pred);
+}
+
+enum CallInlinePolicy {
+  CIP_Allowed,
+  CIP_DisallowedOnce,
+  CIP_DisallowedAlways
+};
+
+static CallInlinePolicy mayInlineCallKind(const CallEvent &Call,
+                                          const ExplodedNode *Pred,
+                                          AnalyzerOptions &Opts) {
+  const LocationContext *CurLC = Pred->getLocationContext();
+  const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame();
+  switch (Call.getKind()) {
+  case CE_Function:
+  case CE_Block:
+    break;
+  case CE_CXXMember:
+  case CE_CXXMemberOperator:
+    if (!Opts.mayInlineCXXMemberFunction(CIMK_MemberFunctions))
+      return CIP_DisallowedAlways;
+    break;
+  case CE_CXXConstructor: {
+    if (!Opts.mayInlineCXXMemberFunction(CIMK_Constructors))
+      return CIP_DisallowedAlways;
+
+    const CXXConstructorCall &Ctor = cast<CXXConstructorCall>(Call);
+
+    // FIXME: We don't handle constructors or destructors for arrays properly.
+    // Even once we do, we still need to be careful about implicitly-generated
+    // initializers for array fields in default move/copy constructors.
+    const MemRegion *Target = Ctor.getCXXThisVal().getAsRegion();
+    if (Target && isa<ElementRegion>(Target))
+      return CIP_DisallowedOnce;
+
+    // FIXME: This is a hack. We don't use the correct region for a new
+    // expression, so if we inline the constructor its result will just be
+    // thrown away. This short-term hack is tracked in <rdar://problem/12180598>
+    // and the longer-term possible fix is discussed in PR12014.
+    const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr();
+    if (const Stmt *Parent = CurLC->getParentMap().getParent(CtorExpr))
+      if (isa<CXXNewExpr>(Parent))
+        return CIP_DisallowedOnce;
+
+    // Inlining constructors requires including initializers in the CFG.
+    const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
+    assert(ADC->getCFGBuildOptions().AddInitializers && "No CFG initializers");
+    (void)ADC;
+
+    // If the destructor is trivial, it's always safe to inline the constructor.
+    if (Ctor.getDecl()->getParent()->hasTrivialDestructor())
+      break;
+
+    // For other types, only inline constructors if destructor inlining is
+    // also enabled.
+    if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
+      return CIP_DisallowedAlways;
+
+    // FIXME: This is a hack. We don't handle temporary destructors
+    // right now, so we shouldn't inline their constructors.
+    if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete)
+      if (!Target || !isa<DeclRegion>(Target))
+        return CIP_DisallowedOnce;
+
+    break;
+  }
+  case CE_CXXDestructor: {
+    if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors))
+      return CIP_DisallowedAlways;
+
+    // Inlining destructors requires building the CFG correctly.
+    const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext();
+    assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors");
+    (void)ADC;
+
+    const CXXDestructorCall &Dtor = cast<CXXDestructorCall>(Call);
+
+    // FIXME: We don't handle constructors or destructors for arrays properly.
+    const MemRegion *Target = Dtor.getCXXThisVal().getAsRegion();
+    if (Target && isa<ElementRegion>(Target))
+      return CIP_DisallowedOnce;
+
+    break;
+  }
+  case CE_CXXAllocator:
+    // Do not inline allocators until we model deallocators.
+    // This is unfortunate, but basically necessary for smart pointers and such.
+    return CIP_DisallowedAlways;
+  case CE_ObjCMessage:
+    if (!Opts.mayInlineObjCMethod())
+      return CIP_DisallowedAlways;
+    if (!(Opts.getIPAMode() == IPAK_DynamicDispatch ||
+          Opts.getIPAMode() == IPAK_DynamicDispatchBifurcate))
+      return CIP_DisallowedAlways;
+    break;
+  }
+
+  return CIP_Allowed;
+}
+
+/// Returns true if the given C++ class contains a member with the given name.
+static bool hasMember(const ASTContext &Ctx, const CXXRecordDecl *RD,
+                      StringRef Name) {
+  const IdentifierInfo &II = Ctx.Idents.get(Name);
+  DeclarationName DeclName = Ctx.DeclarationNames.getIdentifier(&II);
+  if (!RD->lookup(DeclName).empty())
+    return true;
+
+  CXXBasePaths Paths(false, false, false);
+  if (RD->lookupInBases(&CXXRecordDecl::FindOrdinaryMember,
+                        DeclName.getAsOpaquePtr(),
+                        Paths))
+    return true;
+
+  return false;
+}
+
+/// Returns true if the given C++ class is a container or iterator.
+///
+/// Our heuristic for this is whether it contains a method named 'begin()' or a
+/// nested type named 'iterator' or 'iterator_category'.
+static bool isContainerClass(const ASTContext &Ctx, const CXXRecordDecl *RD) {
+  return hasMember(Ctx, RD, "begin") ||
+         hasMember(Ctx, RD, "iterator") ||
+         hasMember(Ctx, RD, "iterator_category");
+}
+
+/// Returns true if the given function refers to a constructor or destructor of
+/// a C++ container or iterator.
+///
+/// We generally do a poor job modeling most containers right now, and would
+/// prefer not to inline their setup and teardown.
+static bool isContainerCtorOrDtor(const ASTContext &Ctx,
+                                  const FunctionDecl *FD) {
+  if (!(isa<CXXConstructorDecl>(FD) || isa<CXXDestructorDecl>(FD)))
+    return false;
+
+  const CXXRecordDecl *RD = cast<CXXMethodDecl>(FD)->getParent();
+  return isContainerClass(Ctx, RD);
+}
+
+/// Returns true if the function in \p CalleeADC may be inlined in general.
+///
+/// This checks static properties of the function, such as its signature and
+/// CFG, to determine whether the analyzer should ever consider inlining it,
+/// in any context.
+static bool mayInlineDecl(const CallEvent &Call, AnalysisDeclContext *CalleeADC,
+                          AnalyzerOptions &Opts) {
+  // FIXME: Do not inline variadic calls.
+  if (Call.isVariadic())
+    return false;
+
+  // Check certain C++-related inlining policies.
+  ASTContext &Ctx = CalleeADC->getASTContext();
+  if (Ctx.getLangOpts().CPlusPlus) {
+    if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CalleeADC->getDecl())) {
+      // Conditionally control the inlining of template functions.
+      if (!Opts.mayInlineTemplateFunctions())
+        if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate)
+          return false;
+
+      // Conditionally control the inlining of C++ standard library functions.
+      if (!Opts.mayInlineCXXStandardLibrary())
+        if (Ctx.getSourceManager().isInSystemHeader(FD->getLocation()))
+          if (IsInStdNamespace(FD))
+            return false;
+
+      // Conditionally control the inlining of methods on objects that look
+      // like C++ containers.
+      if (!Opts.mayInlineCXXContainerCtorsAndDtors())
+        if (!Ctx.getSourceManager().isFromMainFile(FD->getLocation()))
+          if (isContainerCtorOrDtor(Ctx, FD))
+            return false;
+    }
+  }
+
+  // It is possible that the CFG cannot be constructed.
+  // Be safe, and check if the CalleeCFG is valid.
+  const CFG *CalleeCFG = CalleeADC->getCFG();
+  if (!CalleeCFG)
+    return false;
+
+  // Do not inline large functions.
+  if (CalleeCFG->getNumBlockIDs() > Opts.getMaxInlinableSize())
+    return false;
+
+  // It is possible that the live variables analysis cannot be
+  // run.  If so, bail out.
+  if (!CalleeADC->getAnalysis<RelaxedLiveVariables>())
+    return false;
+
+  return true;
+}
+
+bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
+                                  const ExplodedNode *Pred) {
+  if (!D)
+    return false;
+
+  AnalysisManager &AMgr = getAnalysisManager();
+  AnalyzerOptions &Opts = AMgr.options;
+  AnalysisDeclContextManager &ADCMgr = AMgr.getAnalysisDeclContextManager();
+  AnalysisDeclContext *CalleeADC = ADCMgr.getContext(D);
+
+  // The auto-synthesized bodies are essential to inline as they are
+  // usually small and commonly used. Note: we should do this check early on to
+  // ensure we always inline these calls.
+  if (CalleeADC->isBodyAutosynthesized())
+    return true;
+
+  if (!AMgr.shouldInlineCall())
+    return false;
+
+  // Check if this function has been marked as non-inlinable.
+  Optional<bool> MayInline = Engine.FunctionSummaries->mayInline(D);
+  if (MayInline.hasValue()) {
+    if (!MayInline.getValue())
+      return false;
+
+  } else {
+    // We haven't actually checked the static properties of this function yet.
+    // Do that now, and record our decision in the function summaries.
+    if (mayInlineDecl(Call, CalleeADC, Opts)) {
+      Engine.FunctionSummaries->markMayInline(D);
+    } else {
+      Engine.FunctionSummaries->markShouldNotInline(D);
+      return false;
+    }
+  }
+
+  // Check if we should inline a call based on its kind.
+  // FIXME: this checks both static and dynamic properties of the call, which
+  // means we're redoing a bit of work that could be cached in the function
+  // summary.
+  CallInlinePolicy CIP = mayInlineCallKind(Call, Pred, Opts);
+  if (CIP != CIP_Allowed) {
+    if (CIP == CIP_DisallowedAlways) {
+      assert(!MayInline.hasValue() || MayInline.getValue());
+      Engine.FunctionSummaries->markShouldNotInline(D);
+    }
+    return false;
+  }
+
+  const CFG *CalleeCFG = CalleeADC->getCFG();
+
+  // Do not inline if recursive or we've reached max stack frame count.
+  bool IsRecursive = false;
+  unsigned StackDepth = 0;
+  examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth);
+  if ((StackDepth >= Opts.InlineMaxStackDepth) &&
+      ((CalleeCFG->getNumBlockIDs() > Opts.getAlwaysInlineSize())
+       || IsRecursive))
+    return false;
+
+  // Do not inline large functions too many times.
+  if ((Engine.FunctionSummaries->getNumTimesInlined(D) >
+       Opts.getMaxTimesInlineLarge()) &&
+      CalleeCFG->getNumBlockIDs() > 13) {
+    NumReachedInlineCountMax++;
+    return false;
+  }
+
+  if (HowToInline == Inline_Minimal &&
+      (CalleeCFG->getNumBlockIDs() > Opts.getAlwaysInlineSize()
+      || IsRecursive))
+    return false;
+
+  Engine.FunctionSummaries->bumpNumTimesInlined(D);
+
+  return true;
+}
+
+static bool isTrivialObjectAssignment(const CallEvent &Call) {
+  const CXXInstanceCall *ICall = dyn_cast<CXXInstanceCall>(&Call);
+  if (!ICall)
+    return false;
+
+  const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(ICall->getDecl());
+  if (!MD)
+    return false;
+  if (!(MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()))
+    return false;
+
+  return MD->isTrivial();
+}
+
+void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
+                                 const CallEvent &CallTemplate) {
+  // Make sure we have the most recent state attached to the call.
+  ProgramStateRef State = Pred->getState();
+  CallEventRef<> Call = CallTemplate.cloneWithState(State);
+
+  // Special-case trivial assignment operators.
+  if (isTrivialObjectAssignment(*Call)) {
+    performTrivialCopy(Bldr, Pred, *Call);
+    return;
+  }
+
+  // Try to inline the call.
+  // The origin expression here is just used as a kind of checksum;
+  // this should still be safe even for CallEvents that don't come from exprs.
+  const Expr *E = Call->getOriginExpr();
+
+  ProgramStateRef InlinedFailedState = getInlineFailedState(State, E);
+  if (InlinedFailedState) {
+    // If we already tried once and failed, make sure we don't retry later.
+    State = InlinedFailedState;
+  } else {
+    RuntimeDefinition RD = Call->getRuntimeDefinition();
+    const Decl *D = RD.getDecl();
+    if (shouldInlineCall(*Call, D, Pred)) {
+      if (RD.mayHaveOtherDefinitions()) {
+        AnalyzerOptions &Options = getAnalysisManager().options;
+
+        // Explore with and without inlining the call.
+        if (Options.getIPAMode() == IPAK_DynamicDispatchBifurcate) {
+          BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred);
+          return;
+        }
+
+        // Don't inline if we're not in any dynamic dispatch mode.
+        if (Options.getIPAMode() != IPAK_DynamicDispatch) {
+          conservativeEvalCall(*Call, Bldr, Pred, State);
+          return;
+        }
+      }
+
+      // We are not bifurcating and we do have a Decl, so just inline.
+      if (inlineCall(*Call, D, Bldr, Pred, State))
+        return;
+    }
+  }
+
+  // If we can't inline it, handle the return value and invalidate the regions.
+  conservativeEvalCall(*Call, Bldr, Pred, State);
+}
+
+void ExprEngine::BifurcateCall(const MemRegion *BifurReg,
+                               const CallEvent &Call, const Decl *D,
+                               NodeBuilder &Bldr, ExplodedNode *Pred) {
+  assert(BifurReg);
+  BifurReg = BifurReg->StripCasts();
+
+  // Check if we've performed the split already - note, we only want
+  // to split the path once per memory region.
+  ProgramStateRef State = Pred->getState();
+  const unsigned *BState =
+                        State->get<DynamicDispatchBifurcationMap>(BifurReg);
+  if (BState) {
+    // If we are on "inline path", keep inlining if possible.
+    if (*BState == DynamicDispatchModeInlined)
+      if (inlineCall(Call, D, Bldr, Pred, State))
+        return;
+    // If inline failed, or we are on the path where we assume we
+    // don't have enough info about the receiver to inline, conjure the
+    // return value and invalidate the regions.
+    conservativeEvalCall(Call, Bldr, Pred, State);
+    return;
+  }
+
+  // If we got here, this is the first time we process a message to this
+  // region, so split the path.
+  ProgramStateRef IState =
+      State->set<DynamicDispatchBifurcationMap>(BifurReg,
+                                               DynamicDispatchModeInlined);
+  inlineCall(Call, D, Bldr, Pred, IState);
+
+  ProgramStateRef NoIState =
+      State->set<DynamicDispatchBifurcationMap>(BifurReg,
+                                               DynamicDispatchModeConservative);
+  conservativeEvalCall(Call, Bldr, Pred, NoIState);
+
+  NumOfDynamicDispatchPathSplits++;
+  return;
+}
+
+
+void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred,
+                                 ExplodedNodeSet &Dst) {
+  
+  ExplodedNodeSet dstPreVisit;
+  getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, RS, *this);
+
+  StmtNodeBuilder B(dstPreVisit, Dst, *currBldrCtx);
+  
+  if (RS->getRetValue()) {
+    for (ExplodedNodeSet::iterator it = dstPreVisit.begin(),
+                                  ei = dstPreVisit.end(); it != ei; ++it) {
+      B.generateNode(RS, *it, (*it)->getState());
+    }
+  }
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
new file mode 100644
index 0000000..d276d92
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
@@ -0,0 +1,215 @@
+//=-- ExprEngineObjC.cpp - ExprEngine support for Objective-C ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines ExprEngine's support for Objective-C expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/StmtObjC.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+
+using namespace clang;
+using namespace ento;
+
+void ExprEngine::VisitLvalObjCIvarRefExpr(const ObjCIvarRefExpr *Ex, 
+                                          ExplodedNode *Pred,
+                                          ExplodedNodeSet &Dst) {
+  ProgramStateRef state = Pred->getState();
+  const LocationContext *LCtx = Pred->getLocationContext();
+  SVal baseVal = state->getSVal(Ex->getBase(), LCtx);
+  SVal location = state->getLValue(Ex->getDecl(), baseVal);
+  
+  ExplodedNodeSet dstIvar;
+  StmtNodeBuilder Bldr(Pred, dstIvar, *currBldrCtx);
+  Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, location));
+  
+  // Perform the post-condition check of the ObjCIvarRefExpr and store
+  // the created nodes in 'Dst'.
+  getCheckerManager().runCheckersForPostStmt(Dst, dstIvar, Ex, *this);
+}
+
+void ExprEngine::VisitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt *S,
+                                             ExplodedNode *Pred,
+                                             ExplodedNodeSet &Dst) {
+  getCheckerManager().runCheckersForPreStmt(Dst, Pred, S, *this);
+}
+
+void ExprEngine::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S,
+                                            ExplodedNode *Pred,
+                                            ExplodedNodeSet &Dst) {
+  
+  // ObjCForCollectionStmts are processed in two places.  This method
+  // handles the case where an ObjCForCollectionStmt* occurs as one of the
+  // statements within a basic block.  This transfer function does two things:
+  //
+  //  (1) binds the next container value to 'element'.  This creates a new
+  //      node in the ExplodedGraph.
+  //
+  //  (2) binds the value 0/1 to the ObjCForCollectionStmt* itself, indicating
+  //      whether or not the container has any more elements.  This value
+  //      will be tested in ProcessBranch.  We need to explicitly bind
+  //      this value because a container can contain nil elements.
+  //
+  // FIXME: Eventually this logic should actually do dispatches to
+  //   'countByEnumeratingWithState:objects:count:' (NSFastEnumeration).
+  //   This will require simulating a temporary NSFastEnumerationState, either
+  //   through an SVal or through the use of MemRegions.  This value can
+  //   be affixed to the ObjCForCollectionStmt* instead of 0/1; when the loop
+  //   terminates we reclaim the temporary (it goes out of scope) and we
+  //   we can test if the SVal is 0 or if the MemRegion is null (depending
+  //   on what approach we take).
+  //
+  //  For now: simulate (1) by assigning either a symbol or nil if the
+  //    container is empty.  Thus this transfer function will by default
+  //    result in state splitting.
+
+  const Stmt *elem = S->getElement();
+  ProgramStateRef state = Pred->getState();
+  SVal elementV;
+  
+  if (const DeclStmt *DS = dyn_cast<DeclStmt>(elem)) {
+    const VarDecl *elemD = cast<VarDecl>(DS->getSingleDecl());
+    assert(elemD->getInit() == 0);
+    elementV = state->getLValue(elemD, Pred->getLocationContext());
+  }
+  else {
+    elementV = state->getSVal(elem, Pred->getLocationContext());
+  }
+  
+  ExplodedNodeSet dstLocation;
+  evalLocation(dstLocation, S, elem, Pred, state, elementV, NULL, false);
+
+  ExplodedNodeSet Tmp;
+  StmtNodeBuilder Bldr(Pred, Tmp, *currBldrCtx);
+
+  for (ExplodedNodeSet::iterator NI = dstLocation.begin(),
+       NE = dstLocation.end(); NI!=NE; ++NI) {
+    Pred = *NI;
+    ProgramStateRef state = Pred->getState();
+    const LocationContext *LCtx = Pred->getLocationContext();
+    
+    // Handle the case where the container still has elements.
+    SVal TrueV = svalBuilder.makeTruthVal(1);
+    ProgramStateRef hasElems = state->BindExpr(S, LCtx, TrueV);
+    
+    // Handle the case where the container has no elements.
+    SVal FalseV = svalBuilder.makeTruthVal(0);
+    ProgramStateRef noElems = state->BindExpr(S, LCtx, FalseV);
+
+    if (Optional<loc::MemRegionVal> MV = elementV.getAs<loc::MemRegionVal>())
+      if (const TypedValueRegion *R = 
+          dyn_cast<TypedValueRegion>(MV->getRegion())) {
+        // FIXME: The proper thing to do is to really iterate over the
+        //  container.  We will do this with dispatch logic to the store.
+        //  For now, just 'conjure' up a symbolic value.
+        QualType T = R->getValueType();
+        assert(Loc::isLocType(T));
+        SymbolRef Sym = SymMgr.conjureSymbol(elem, LCtx, T,
+                                             currBldrCtx->blockCount());
+        SVal V = svalBuilder.makeLoc(Sym);
+        hasElems = hasElems->bindLoc(elementV, V);
+        
+        // Bind the location to 'nil' on the false branch.
+        SVal nilV = svalBuilder.makeIntVal(0, T);
+        noElems = noElems->bindLoc(elementV, nilV);
+      }
+    
+    // Create the new nodes.
+    Bldr.generateNode(S, Pred, hasElems);
+    Bldr.generateNode(S, Pred, noElems);
+  }
+
+  // Finally, run any custom checkers.
+  // FIXME: Eventually all pre- and post-checks should live in VisitStmt.
+  getCheckerManager().runCheckersForPostStmt(Dst, Tmp, S, *this);
+}
+
+void ExprEngine::VisitObjCMessage(const ObjCMessageExpr *ME,
+                                  ExplodedNode *Pred,
+                                  ExplodedNodeSet &Dst) {
+  CallEventManager &CEMgr = getStateManager().getCallEventManager();
+  CallEventRef<ObjCMethodCall> Msg =
+    CEMgr.getObjCMethodCall(ME, Pred->getState(), Pred->getLocationContext());
+
+  // Handle the previsits checks.
+  ExplodedNodeSet dstPrevisit;
+  getCheckerManager().runCheckersForPreObjCMessage(dstPrevisit, Pred,
+                                                   *Msg, *this);
+  ExplodedNodeSet dstGenericPrevisit;
+  getCheckerManager().runCheckersForPreCall(dstGenericPrevisit, dstPrevisit,
+                                            *Msg, *this);
+
+  // Proceed with evaluate the message expression.
+  ExplodedNodeSet dstEval;
+  StmtNodeBuilder Bldr(dstGenericPrevisit, dstEval, *currBldrCtx);
+
+  for (ExplodedNodeSet::iterator DI = dstGenericPrevisit.begin(),
+       DE = dstGenericPrevisit.end(); DI != DE; ++DI) {
+    ExplodedNode *Pred = *DI;
+    ProgramStateRef State = Pred->getState();
+    CallEventRef<ObjCMethodCall> UpdatedMsg = Msg.cloneWithState(State);
+    
+    if (UpdatedMsg->isInstanceMessage()) {
+      SVal recVal = UpdatedMsg->getReceiverSVal();
+      if (!recVal.isUndef()) {
+        // Bifurcate the state into nil and non-nil ones.
+        DefinedOrUnknownSVal receiverVal =
+            recVal.castAs<DefinedOrUnknownSVal>();
+
+        ProgramStateRef notNilState, nilState;
+        llvm::tie(notNilState, nilState) = State->assume(receiverVal);
+        
+        // There are three cases: can be nil or non-nil, must be nil, must be
+        // non-nil. We ignore must be nil, and merge the rest two into non-nil.
+        // FIXME: This ignores many potential bugs (<rdar://problem/11733396>).
+        // Revisit once we have lazier constraints.
+        if (nilState && !notNilState) {
+          continue;
+        }
+        
+        // Check if the "raise" message was sent.
+        assert(notNilState);
+        if (ObjCNoRet.isImplicitNoReturn(ME)) {
+          // If we raise an exception, for now treat it as a sink.
+          // Eventually we will want to handle exceptions properly.
+          Bldr.generateSink(ME, Pred, State);
+          continue;
+        }
+        
+        // Generate a transition to non-Nil state.
+        if (notNilState != State) {
+          Pred = Bldr.generateNode(ME, Pred, notNilState);
+          assert(Pred && "Should have cached out already!");
+        }
+      }
+    } else {
+      // Check for special class methods that are known to not return
+      // and that we should treat as a sink.
+      if (ObjCNoRet.isImplicitNoReturn(ME)) {
+        // If we raise an exception, for now treat it as a sink.
+        // Eventually we will want to handle exceptions properly.
+        Bldr.generateSink(ME, Pred, Pred->getState());
+        continue;
+      }
+    }
+
+    defaultEvalCall(Bldr, Pred, *UpdatedMsg);
+  }
+  
+  ExplodedNodeSet dstPostvisit;
+  getCheckerManager().runCheckersForPostCall(dstPostvisit, dstEval,
+                                             *Msg, *this);
+
+  // Finally, perform the post-condition check of the ObjCMessageExpr and store
+  // the created nodes in 'Dst'.
+  getCheckerManager().runCheckersForPostObjCMessage(Dst, dstPostvisit,
+                                                    *Msg, *this);
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/FunctionSummary.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/FunctionSummary.cpp
new file mode 100644
index 0000000..c21735b
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/FunctionSummary.cpp
@@ -0,0 +1,32 @@
+//== FunctionSummary.cpp - Stores summaries of functions. ----------*- C++ -*-//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a summary of a function gathered/used by static analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h"
+using namespace clang;
+using namespace ento;
+
+unsigned FunctionSummariesTy::getTotalNumBasicBlocks() {
+  unsigned Total = 0;
+  for (MapTy::iterator I = Map.begin(), E = Map.end(); I != E; ++I) {
+    Total += I->second.TotalBasicBlocks;
+  }
+  return Total;
+}
+
+unsigned FunctionSummariesTy::getTotalNumVisitedBasicBlocks() {
+  unsigned Total = 0;
+  for (MapTy::iterator I = Map.begin(), E = Map.end(); I != E; ++I) {
+    Total += I->second.VisitedBasicBlocks.count();
+  }
+  return Total;
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
new file mode 100644
index 0000000..73426da
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
@@ -0,0 +1,566 @@
+//===--- HTMLDiagnostics.cpp - HTML Diagnostics for Paths ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines the HTMLDiagnostics object.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Rewrite/Core/HTMLRewrite.h"
+#include "clang/Rewrite/Core/Rewriter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+//===----------------------------------------------------------------------===//
+// Boilerplate.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class HTMLDiagnostics : public PathDiagnosticConsumer {
+  llvm::sys::Path Directory, FilePrefix;
+  bool createdDir, noDir;
+  const Preprocessor &PP;
+public:
+  HTMLDiagnostics(const std::string& prefix, const Preprocessor &pp);
+
+  virtual ~HTMLDiagnostics() { FlushDiagnostics(NULL); }
+
+  virtual void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
+                                    FilesMade *filesMade);
+
+  virtual StringRef getName() const {
+    return "HTMLDiagnostics";
+  }
+
+  unsigned ProcessMacroPiece(raw_ostream &os,
+                             const PathDiagnosticMacroPiece& P,
+                             unsigned num);
+
+  void HandlePiece(Rewriter& R, FileID BugFileID,
+                   const PathDiagnosticPiece& P, unsigned num, unsigned max);
+
+  void HighlightRange(Rewriter& R, FileID BugFileID, SourceRange Range,
+                      const char *HighlightStart = "<span class=\"mrange\">",
+                      const char *HighlightEnd = "</span>");
+
+  void ReportDiag(const PathDiagnostic& D,
+                  FilesMade *filesMade);
+};
+
+} // end anonymous namespace
+
+HTMLDiagnostics::HTMLDiagnostics(const std::string& prefix,
+                                 const Preprocessor &pp)
+  : Directory(prefix), FilePrefix(prefix), createdDir(false), noDir(false),
+    PP(pp) {
+  // All html files begin with "report"
+  FilePrefix.appendComponent("report");
+}
+
+void ento::createHTMLDiagnosticConsumer(AnalyzerOptions &AnalyzerOpts,
+                                        PathDiagnosticConsumers &C,
+                                        const std::string& prefix,
+                                        const Preprocessor &PP) {
+  C.push_back(new HTMLDiagnostics(prefix, PP));
+}
+
+//===----------------------------------------------------------------------===//
+// Report processing.
+//===----------------------------------------------------------------------===//
+
+void HTMLDiagnostics::FlushDiagnosticsImpl(
+  std::vector<const PathDiagnostic *> &Diags,
+  FilesMade *filesMade) {
+  for (std::vector<const PathDiagnostic *>::iterator it = Diags.begin(),
+       et = Diags.end(); it != et; ++it) {
+    ReportDiag(**it, filesMade);
+  }
+}
+
+void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
+                                 FilesMade *filesMade) {
+    
+  // Create the HTML directory if it is missing.
+  if (!createdDir) {
+    createdDir = true;
+    std::string ErrorMsg;
+    Directory.createDirectoryOnDisk(true, &ErrorMsg);
+
+    bool IsDirectory;
+    if (llvm::sys::fs::is_directory(Directory.str(), IsDirectory) ||
+        !IsDirectory) {
+      llvm::errs() << "warning: could not create directory '"
+                   << Directory.str() << "'\n"
+                   << "reason: " << ErrorMsg << '\n';
+
+      noDir = true;
+
+      return;
+    }
+  }
+
+  if (noDir)
+    return;
+
+  // First flatten out the entire path to make it easier to use.
+  PathPieces path = D.path.flatten(/*ShouldFlattenMacros=*/false);
+
+  // The path as already been prechecked that all parts of the path are
+  // from the same file and that it is non-empty.
+  const SourceManager &SMgr = (*path.begin())->getLocation().getManager();
+  assert(!path.empty());
+  FileID FID =
+    (*path.begin())->getLocation().asLocation().getExpansionLoc().getFileID();
+  assert(!FID.isInvalid());
+
+  // Create a new rewriter to generate HTML.
+  Rewriter R(const_cast<SourceManager&>(SMgr), PP.getLangOpts());
+
+  // Process the path.
+  unsigned n = path.size();
+  unsigned max = n;
+
+  for (PathPieces::const_reverse_iterator I = path.rbegin(), 
+       E = path.rend();
+        I != E; ++I, --n)
+    HandlePiece(R, FID, **I, n, max);
+
+  // Add line numbers, header, footer, etc.
+
+  // unsigned FID = R.getSourceMgr().getMainFileID();
+  html::EscapeText(R, FID);
+  html::AddLineNumbers(R, FID);
+
+  // If we have a preprocessor, relex the file and syntax highlight.
+  // We might not have a preprocessor if we come from a deserialized AST file,
+  // for example.
+
+  html::SyntaxHighlight(R, FID, PP);
+  html::HighlightMacros(R, FID, PP);
+
+  // Get the full directory name of the analyzed file.
+
+  const FileEntry* Entry = SMgr.getFileEntryForID(FID);
+
+  // This is a cludge; basically we want to append either the full
+  // working directory if we have no directory information.  This is
+  // a work in progress.
+
+  std::string DirName = "";
+
+  if (llvm::sys::path::is_relative(Entry->getName())) {
+    llvm::sys::Path P = llvm::sys::Path::GetCurrentDirectory();
+    DirName = P.str() + "/";
+  }
+
+  // Add the name of the file as an <h1> tag.
+
+  {
+    std::string s;
+    llvm::raw_string_ostream os(s);
+
+    os << "<!-- REPORTHEADER -->\n"
+      << "<h3>Bug Summary</h3>\n<table class=\"simpletable\">\n"
+          "<tr><td class=\"rowname\">File:</td><td>"
+      << html::EscapeText(DirName)
+      << html::EscapeText(Entry->getName())
+      << "</td></tr>\n<tr><td class=\"rowname\">Location:</td><td>"
+         "<a href=\"#EndPath\">line "
+      << (*path.rbegin())->getLocation().asLocation().getExpansionLineNumber()
+      << ", column "
+      << (*path.rbegin())->getLocation().asLocation().getExpansionColumnNumber()
+      << "</a></td></tr>\n"
+         "<tr><td class=\"rowname\">Description:</td><td>"
+      << D.getVerboseDescription() << "</td></tr>\n";
+
+    // Output any other meta data.
+
+    for (PathDiagnostic::meta_iterator I=D.meta_begin(), E=D.meta_end();
+         I!=E; ++I) {
+      os << "<tr><td></td><td>" << html::EscapeText(*I) << "</td></tr>\n";
+    }
+
+    os << "</table>\n<!-- REPORTSUMMARYEXTRA -->\n"
+          "<h3>Annotated Source Code</h3>\n";
+
+    R.InsertTextBefore(SMgr.getLocForStartOfFile(FID), os.str());
+  }
+
+  // Embed meta-data tags.
+  {
+    std::string s;
+    llvm::raw_string_ostream os(s);
+
+    StringRef BugDesc = D.getVerboseDescription();
+    if (!BugDesc.empty())
+      os << "\n<!-- BUGDESC " << BugDesc << " -->\n";
+
+    StringRef BugType = D.getBugType();
+    if (!BugType.empty())
+      os << "\n<!-- BUGTYPE " << BugType << " -->\n";
+
+    StringRef BugCategory = D.getCategory();
+    if (!BugCategory.empty())
+      os << "\n<!-- BUGCATEGORY " << BugCategory << " -->\n";
+
+    os << "\n<!-- BUGFILE " << DirName << Entry->getName() << " -->\n";
+
+    os << "\n<!-- BUGLINE "
+       << path.back()->getLocation().asLocation().getExpansionLineNumber()
+       << " -->\n";
+
+    os << "\n<!-- BUGPATHLENGTH " << path.size() << " -->\n";
+
+    // Mark the end of the tags.
+    os << "\n<!-- BUGMETAEND -->\n";
+
+    // Insert the text.
+    R.InsertTextBefore(SMgr.getLocForStartOfFile(FID), os.str());
+  }
+
+  // Add CSS, header, and footer.
+
+  html::AddHeaderFooterInternalBuiltinCSS(R, FID, Entry->getName());
+
+  // Get the rewrite buffer.
+  const RewriteBuffer *Buf = R.getRewriteBufferFor(FID);
+
+  if (!Buf) {
+    llvm::errs() << "warning: no diagnostics generated for main file.\n";
+    return;
+  }
+
+  // Create a path for the target HTML file.
+  llvm::sys::Path F(FilePrefix);
+  F.makeUnique(false, NULL);
+
+  // Rename the file with an HTML extension.
+  llvm::sys::Path H(F);
+  H.appendSuffix("html");
+  F.renamePathOnDisk(H, NULL);
+
+  std::string ErrorMsg;
+  llvm::raw_fd_ostream os(H.c_str(), ErrorMsg);
+
+  if (!ErrorMsg.empty()) {
+    llvm::errs() << "warning: could not create file '" << F.str()
+                 << "'\n";
+    return;
+  }
+
+  if (filesMade) {
+    filesMade->addDiagnostic(D, getName(), llvm::sys::path::filename(H.str()));
+  }
+
+  // Emit the HTML to disk.
+  for (RewriteBuffer::iterator I = Buf->begin(), E = Buf->end(); I!=E; ++I)
+      os << *I;
+}
+
+void HTMLDiagnostics::HandlePiece(Rewriter& R, FileID BugFileID,
+                                  const PathDiagnosticPiece& P,
+                                  unsigned num, unsigned max) {
+
+  // For now, just draw a box above the line in question, and emit the
+  // warning.
+  FullSourceLoc Pos = P.getLocation().asLocation();
+
+  if (!Pos.isValid())
+    return;
+
+  SourceManager &SM = R.getSourceMgr();
+  assert(&Pos.getManager() == &SM && "SourceManagers are different!");
+  std::pair<FileID, unsigned> LPosInfo = SM.getDecomposedExpansionLoc(Pos);
+
+  if (LPosInfo.first != BugFileID)
+    return;
+
+  const llvm::MemoryBuffer *Buf = SM.getBuffer(LPosInfo.first);
+  const char* FileStart = Buf->getBufferStart();
+
+  // Compute the column number.  Rewind from the current position to the start
+  // of the line.
+  unsigned ColNo = SM.getColumnNumber(LPosInfo.first, LPosInfo.second);
+  const char *TokInstantiationPtr =Pos.getExpansionLoc().getCharacterData();
+  const char *LineStart = TokInstantiationPtr-ColNo;
+
+  // Compute LineEnd.
+  const char *LineEnd = TokInstantiationPtr;
+  const char* FileEnd = Buf->getBufferEnd();
+  while (*LineEnd != '\n' && LineEnd != FileEnd)
+    ++LineEnd;
+
+  // Compute the margin offset by counting tabs and non-tabs.
+  unsigned PosNo = 0;
+  for (const char* c = LineStart; c != TokInstantiationPtr; ++c)
+    PosNo += *c == '\t' ? 8 : 1;
+
+  // Create the html for the message.
+
+  const char *Kind = 0;
+  switch (P.getKind()) {
+  case PathDiagnosticPiece::Call:
+      llvm_unreachable("Calls should already be handled");
+  case PathDiagnosticPiece::Event:  Kind = "Event"; break;
+  case PathDiagnosticPiece::ControlFlow: Kind = "Control"; break;
+    // Setting Kind to "Control" is intentional.
+  case PathDiagnosticPiece::Macro: Kind = "Control"; break;
+  }
+
+  std::string sbuf;
+  llvm::raw_string_ostream os(sbuf);
+
+  os << "\n<tr><td class=\"num\"></td><td class=\"line\"><div id=\"";
+
+  if (num == max)
+    os << "EndPath";
+  else
+    os << "Path" << num;
+
+  os << "\" class=\"msg";
+  if (Kind)
+    os << " msg" << Kind;
+  os << "\" style=\"margin-left:" << PosNo << "ex";
+
+  // Output a maximum size.
+  if (!isa<PathDiagnosticMacroPiece>(P)) {
+    // Get the string and determining its maximum substring.
+    const std::string& Msg = P.getString();
+    unsigned max_token = 0;
+    unsigned cnt = 0;
+    unsigned len = Msg.size();
+
+    for (std::string::const_iterator I=Msg.begin(), E=Msg.end(); I!=E; ++I)
+      switch (*I) {
+      default:
+        ++cnt;
+        continue;
+      case ' ':
+      case '\t':
+      case '\n':
+        if (cnt > max_token) max_token = cnt;
+        cnt = 0;
+      }
+
+    if (cnt > max_token)
+      max_token = cnt;
+
+    // Determine the approximate size of the message bubble in em.
+    unsigned em;
+    const unsigned max_line = 120;
+
+    if (max_token >= max_line)
+      em = max_token / 2;
+    else {
+      unsigned characters = max_line;
+      unsigned lines = len / max_line;
+
+      if (lines > 0) {
+        for (; characters > max_token; --characters)
+          if (len / characters > lines) {
+            ++characters;
+            break;
+          }
+      }
+
+      em = characters / 2;
+    }
+
+    if (em < max_line/2)
+      os << "; max-width:" << em << "em";
+  }
+  else
+    os << "; max-width:100em";
+
+  os << "\">";
+
+  if (max > 1) {
+    os << "<table class=\"msgT\"><tr><td valign=\"top\">";
+    os << "<div class=\"PathIndex";
+    if (Kind) os << " PathIndex" << Kind;
+    os << "\">" << num << "</div>";
+
+    if (num > 1) {
+      os << "</td><td><div class=\"PathNav\"><a href=\"#Path"
+         << (num - 1)
+         << "\" title=\"Previous event ("
+         << (num - 1)
+         << ")\">&#x2190;</a></div></td>";
+    }
+
+    os << "</td><td>";
+  }
+
+  if (const PathDiagnosticMacroPiece *MP =
+        dyn_cast<PathDiagnosticMacroPiece>(&P)) {
+
+    os << "Within the expansion of the macro '";
+
+    // Get the name of the macro by relexing it.
+    {
+      FullSourceLoc L = MP->getLocation().asLocation().getExpansionLoc();
+      assert(L.isFileID());
+      StringRef BufferInfo = L.getBufferData();
+      std::pair<FileID, unsigned> LocInfo = L.getDecomposedLoc();
+      const char* MacroName = LocInfo.second + BufferInfo.data();
+      Lexer rawLexer(SM.getLocForStartOfFile(LocInfo.first), PP.getLangOpts(),
+                     BufferInfo.begin(), MacroName, BufferInfo.end());
+
+      Token TheTok;
+      rawLexer.LexFromRawLexer(TheTok);
+      for (unsigned i = 0, n = TheTok.getLength(); i < n; ++i)
+        os << MacroName[i];
+    }
+
+    os << "':\n";
+
+    if (max > 1) {
+      os << "</td>";
+      if (num < max) {
+        os << "<td><div class=\"PathNav\"><a href=\"#";
+        if (num == max - 1)
+          os << "EndPath";
+        else
+          os << "Path" << (num + 1);
+        os << "\" title=\"Next event ("
+        << (num + 1)
+        << ")\">&#x2192;</a></div></td>";
+      }
+
+      os << "</tr></table>";
+    }
+
+    // Within a macro piece.  Write out each event.
+    ProcessMacroPiece(os, *MP, 0);
+  }
+  else {
+    os << html::EscapeText(P.getString());
+
+    if (max > 1) {
+      os << "</td>";
+      if (num < max) {
+        os << "<td><div class=\"PathNav\"><a href=\"#";
+        if (num == max - 1)
+          os << "EndPath";
+        else
+          os << "Path" << (num + 1);
+        os << "\" title=\"Next event ("
+           << (num + 1)
+           << ")\">&#x2192;</a></div></td>";
+      }
+      
+      os << "</tr></table>";
+    }
+  }
+
+  os << "</div></td></tr>";
+
+  // Insert the new html.
+  unsigned DisplayPos = LineEnd - FileStart;
+  SourceLocation Loc =
+    SM.getLocForStartOfFile(LPosInfo.first).getLocWithOffset(DisplayPos);
+
+  R.InsertTextBefore(Loc, os.str());
+
+  // Now highlight the ranges.
+  ArrayRef<SourceRange> Ranges = P.getRanges();
+  for (ArrayRef<SourceRange>::iterator I = Ranges.begin(),
+                                       E = Ranges.end(); I != E; ++I) {
+    HighlightRange(R, LPosInfo.first, *I);
+  }
+}
+
+static void EmitAlphaCounter(raw_ostream &os, unsigned n) {
+  unsigned x = n % ('z' - 'a');
+  n /= 'z' - 'a';
+
+  if (n > 0)
+    EmitAlphaCounter(os, n);
+
+  os << char('a' + x);
+}
+
+unsigned HTMLDiagnostics::ProcessMacroPiece(raw_ostream &os,
+                                            const PathDiagnosticMacroPiece& P,
+                                            unsigned num) {
+
+  for (PathPieces::const_iterator I = P.subPieces.begin(), E=P.subPieces.end();
+        I!=E; ++I) {
+
+    if (const PathDiagnosticMacroPiece *MP =
+          dyn_cast<PathDiagnosticMacroPiece>(*I)) {
+      num = ProcessMacroPiece(os, *MP, num);
+      continue;
+    }
+
+    if (PathDiagnosticEventPiece *EP = dyn_cast<PathDiagnosticEventPiece>(*I)) {
+      os << "<div class=\"msg msgEvent\" style=\"width:94%; "
+            "margin-left:5px\">"
+            "<table class=\"msgT\"><tr>"
+            "<td valign=\"top\"><div class=\"PathIndex PathIndexEvent\">";
+      EmitAlphaCounter(os, num++);
+      os << "</div></td><td valign=\"top\">"
+         << html::EscapeText(EP->getString())
+         << "</td></tr></table></div>\n";
+    }
+  }
+
+  return num;
+}
+
+void HTMLDiagnostics::HighlightRange(Rewriter& R, FileID BugFileID,
+                                     SourceRange Range,
+                                     const char *HighlightStart,
+                                     const char *HighlightEnd) {
+  SourceManager &SM = R.getSourceMgr();
+  const LangOptions &LangOpts = R.getLangOpts();
+
+  SourceLocation InstantiationStart = SM.getExpansionLoc(Range.getBegin());
+  unsigned StartLineNo = SM.getExpansionLineNumber(InstantiationStart);
+
+  SourceLocation InstantiationEnd = SM.getExpansionLoc(Range.getEnd());
+  unsigned EndLineNo = SM.getExpansionLineNumber(InstantiationEnd);
+
+  if (EndLineNo < StartLineNo)
+    return;
+
+  if (SM.getFileID(InstantiationStart) != BugFileID ||
+      SM.getFileID(InstantiationEnd) != BugFileID)
+    return;
+
+  // Compute the column number of the end.
+  unsigned EndColNo = SM.getExpansionColumnNumber(InstantiationEnd);
+  unsigned OldEndColNo = EndColNo;
+
+  if (EndColNo) {
+    // Add in the length of the token, so that we cover multi-char tokens.
+    EndColNo += Lexer::MeasureTokenLength(Range.getEnd(), SM, LangOpts)-1;
+  }
+
+  // Highlight the range.  Make the span tag the outermost tag for the
+  // selected range.
+
+  SourceLocation E =
+    InstantiationEnd.getLocWithOffset(EndColNo - OldEndColNo);
+
+  html::HighlightRange(R, InstantiationStart, E, HighlightStart, HighlightEnd);
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/Makefile b/safecode/tools/clang/lib/StaticAnalyzer/Core/Makefile
new file mode 100644
index 0000000..4aebc16
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/Makefile
@@ -0,0 +1,17 @@
+##===- clang/lib/StaticAnalyzer/Core/Makefile --------------*- Makefile -*-===##
+# 
+#                     The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+# 
+##===----------------------------------------------------------------------===##
+#
+# This implements analyses built on top of source-level CFGs. 
+#
+##===----------------------------------------------------------------------===##
+
+CLANG_LEVEL := ../../..
+LIBRARYNAME := clangStaticAnalyzerCore
+
+include $(CLANG_LEVEL)/Makefile
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
new file mode 100644
index 0000000..42073d4
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
@@ -0,0 +1,1437 @@
+//== MemRegion.cpp - Abstract memory regions for static analysis --*- C++ -*--//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines MemRegion and its subclasses.  MemRegion defines a
+//  partially-typed abstraction of memory useful for path-sensitive dataflow
+//  analyses.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Analysis/Support/BumpVector.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+//===----------------------------------------------------------------------===//
+// MemRegion Construction.
+//===----------------------------------------------------------------------===//
+
+template<typename RegionTy> struct MemRegionManagerTrait;
+
+template <typename RegionTy, typename A1>
+RegionTy* MemRegionManager::getRegion(const A1 a1) {
+
+  const typename MemRegionManagerTrait<RegionTy>::SuperRegionTy *superRegion =
+  MemRegionManagerTrait<RegionTy>::getSuperRegion(*this, a1);
+
+  llvm::FoldingSetNodeID ID;
+  RegionTy::ProfileRegion(ID, a1, superRegion);
+  void *InsertPos;
+  RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
+                                                                   InsertPos));
+
+  if (!R) {
+    R = (RegionTy*) A.Allocate<RegionTy>();
+    new (R) RegionTy(a1, superRegion);
+    Regions.InsertNode(R, InsertPos);
+  }
+
+  return R;
+}
+
+template <typename RegionTy, typename A1>
+RegionTy* MemRegionManager::getSubRegion(const A1 a1,
+                                         const MemRegion *superRegion) {
+  llvm::FoldingSetNodeID ID;
+  RegionTy::ProfileRegion(ID, a1, superRegion);
+  void *InsertPos;
+  RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
+                                                                   InsertPos));
+
+  if (!R) {
+    R = (RegionTy*) A.Allocate<RegionTy>();
+    new (R) RegionTy(a1, superRegion);
+    Regions.InsertNode(R, InsertPos);
+  }
+
+  return R;
+}
+
+template <typename RegionTy, typename A1, typename A2>
+RegionTy* MemRegionManager::getRegion(const A1 a1, const A2 a2) {
+
+  const typename MemRegionManagerTrait<RegionTy>::SuperRegionTy *superRegion =
+  MemRegionManagerTrait<RegionTy>::getSuperRegion(*this, a1, a2);
+
+  llvm::FoldingSetNodeID ID;
+  RegionTy::ProfileRegion(ID, a1, a2, superRegion);
+  void *InsertPos;
+  RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
+                                                                   InsertPos));
+
+  if (!R) {
+    R = (RegionTy*) A.Allocate<RegionTy>();
+    new (R) RegionTy(a1, a2, superRegion);
+    Regions.InsertNode(R, InsertPos);
+  }
+
+  return R;
+}
+
+template <typename RegionTy, typename A1, typename A2>
+RegionTy* MemRegionManager::getSubRegion(const A1 a1, const A2 a2,
+                                         const MemRegion *superRegion) {
+
+  llvm::FoldingSetNodeID ID;
+  RegionTy::ProfileRegion(ID, a1, a2, superRegion);
+  void *InsertPos;
+  RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
+                                                                   InsertPos));
+
+  if (!R) {
+    R = (RegionTy*) A.Allocate<RegionTy>();
+    new (R) RegionTy(a1, a2, superRegion);
+    Regions.InsertNode(R, InsertPos);
+  }
+
+  return R;
+}
+
+template <typename RegionTy, typename A1, typename A2, typename A3>
+RegionTy* MemRegionManager::getSubRegion(const A1 a1, const A2 a2, const A3 a3,
+                                         const MemRegion *superRegion) {
+
+  llvm::FoldingSetNodeID ID;
+  RegionTy::ProfileRegion(ID, a1, a2, a3, superRegion);
+  void *InsertPos;
+  RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
+                                                                   InsertPos));
+
+  if (!R) {
+    R = (RegionTy*) A.Allocate<RegionTy>();
+    new (R) RegionTy(a1, a2, a3, superRegion);
+    Regions.InsertNode(R, InsertPos);
+  }
+
+  return R;
+}
+
+//===----------------------------------------------------------------------===//
+// Object destruction.
+//===----------------------------------------------------------------------===//
+
+MemRegion::~MemRegion() {}
+
+MemRegionManager::~MemRegionManager() {
+  // All regions and their data are BumpPtrAllocated.  No need to call
+  // their destructors.
+}
+
+//===----------------------------------------------------------------------===//
+// Basic methods.
+//===----------------------------------------------------------------------===//
+
+bool SubRegion::isSubRegionOf(const MemRegion* R) const {
+  const MemRegion* r = getSuperRegion();
+  while (r != 0) {
+    if (r == R)
+      return true;
+    if (const SubRegion* sr = dyn_cast<SubRegion>(r))
+      r = sr->getSuperRegion();
+    else
+      break;
+  }
+  return false;
+}
+
+MemRegionManager* SubRegion::getMemRegionManager() const {
+  const SubRegion* r = this;
+  do {
+    const MemRegion *superRegion = r->getSuperRegion();
+    if (const SubRegion *sr = dyn_cast<SubRegion>(superRegion)) {
+      r = sr;
+      continue;
+    }
+    return superRegion->getMemRegionManager();
+  } while (1);
+}
+
+const StackFrameContext *VarRegion::getStackFrame() const {
+  const StackSpaceRegion *SSR = dyn_cast<StackSpaceRegion>(getMemorySpace());
+  return SSR ? SSR->getStackFrame() : NULL;
+}
+
+//===----------------------------------------------------------------------===//
+// Region extents.
+//===----------------------------------------------------------------------===//
+
+DefinedOrUnknownSVal TypedValueRegion::getExtent(SValBuilder &svalBuilder) const {
+  ASTContext &Ctx = svalBuilder.getContext();
+  QualType T = getDesugaredValueType(Ctx);
+
+  if (isa<VariableArrayType>(T))
+    return nonloc::SymbolVal(svalBuilder.getSymbolManager().getExtentSymbol(this));
+  if (isa<IncompleteArrayType>(T))
+    return UnknownVal();
+
+  CharUnits size = Ctx.getTypeSizeInChars(T);
+  QualType sizeTy = svalBuilder.getArrayIndexType();
+  return svalBuilder.makeIntVal(size.getQuantity(), sizeTy);
+}
+
+DefinedOrUnknownSVal FieldRegion::getExtent(SValBuilder &svalBuilder) const {
+  // Force callers to deal with bitfields explicitly.
+  if (getDecl()->isBitField())
+    return UnknownVal();
+
+  DefinedOrUnknownSVal Extent = DeclRegion::getExtent(svalBuilder);
+
+  // A zero-length array at the end of a struct often stands for dynamically-
+  // allocated extra memory.
+  if (Extent.isZeroConstant()) {
+    QualType T = getDesugaredValueType(svalBuilder.getContext());
+
+    if (isa<ConstantArrayType>(T))
+      return UnknownVal();
+  }
+
+  return Extent;
+}
+
+DefinedOrUnknownSVal AllocaRegion::getExtent(SValBuilder &svalBuilder) const {
+  return nonloc::SymbolVal(svalBuilder.getSymbolManager().getExtentSymbol(this));
+}
+
+DefinedOrUnknownSVal SymbolicRegion::getExtent(SValBuilder &svalBuilder) const {
+  return nonloc::SymbolVal(svalBuilder.getSymbolManager().getExtentSymbol(this));
+}
+
+DefinedOrUnknownSVal StringRegion::getExtent(SValBuilder &svalBuilder) const {
+  return svalBuilder.makeIntVal(getStringLiteral()->getByteLength()+1,
+                                svalBuilder.getArrayIndexType());
+}
+
+ObjCIvarRegion::ObjCIvarRegion(const ObjCIvarDecl *ivd, const MemRegion* sReg)
+  : DeclRegion(ivd, sReg, ObjCIvarRegionKind) {}
+
+const ObjCIvarDecl *ObjCIvarRegion::getDecl() const {
+  return cast<ObjCIvarDecl>(D);
+}
+
+QualType ObjCIvarRegion::getValueType() const {
+  return getDecl()->getType();
+}
+
+QualType CXXBaseObjectRegion::getValueType() const {
+  return QualType(getDecl()->getTypeForDecl(), 0);
+}
+
+//===----------------------------------------------------------------------===//
+// FoldingSet profiling.
+//===----------------------------------------------------------------------===//
+
+void MemSpaceRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+  ID.AddInteger((unsigned)getKind());
+}
+
+void StackSpaceRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+  ID.AddInteger((unsigned)getKind());
+  ID.AddPointer(getStackFrame());
+}
+
+void StaticGlobalSpaceRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+  ID.AddInteger((unsigned)getKind());
+  ID.AddPointer(getCodeRegion());
+}
+
+void StringRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+                                 const StringLiteral* Str,
+                                 const MemRegion* superRegion) {
+  ID.AddInteger((unsigned) StringRegionKind);
+  ID.AddPointer(Str);
+  ID.AddPointer(superRegion);
+}
+
+void ObjCStringRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+                                     const ObjCStringLiteral* Str,
+                                     const MemRegion* superRegion) {
+  ID.AddInteger((unsigned) ObjCStringRegionKind);
+  ID.AddPointer(Str);
+  ID.AddPointer(superRegion);
+}
+
+void AllocaRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+                                 const Expr *Ex, unsigned cnt,
+                                 const MemRegion *superRegion) {
+  ID.AddInteger((unsigned) AllocaRegionKind);
+  ID.AddPointer(Ex);
+  ID.AddInteger(cnt);
+  ID.AddPointer(superRegion);
+}
+
+void AllocaRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+  ProfileRegion(ID, Ex, Cnt, superRegion);
+}
+
+void CompoundLiteralRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+  CompoundLiteralRegion::ProfileRegion(ID, CL, superRegion);
+}
+
+void CompoundLiteralRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+                                          const CompoundLiteralExpr *CL,
+                                          const MemRegion* superRegion) {
+  ID.AddInteger((unsigned) CompoundLiteralRegionKind);
+  ID.AddPointer(CL);
+  ID.AddPointer(superRegion);
+}
+
+void CXXThisRegion::ProfileRegion(llvm::FoldingSetNodeID &ID,
+                                  const PointerType *PT,
+                                  const MemRegion *sRegion) {
+  ID.AddInteger((unsigned) CXXThisRegionKind);
+  ID.AddPointer(PT);
+  ID.AddPointer(sRegion);
+}
+
+void CXXThisRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+  CXXThisRegion::ProfileRegion(ID, ThisPointerTy, superRegion);
+}
+
+void ObjCIvarRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+                                   const ObjCIvarDecl *ivd,
+                                   const MemRegion* superRegion) {
+  DeclRegion::ProfileRegion(ID, ivd, superRegion, ObjCIvarRegionKind);
+}
+
+void DeclRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, const Decl *D,
+                               const MemRegion* superRegion, Kind k) {
+  ID.AddInteger((unsigned) k);
+  ID.AddPointer(D);
+  ID.AddPointer(superRegion);
+}
+
+void DeclRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+  DeclRegion::ProfileRegion(ID, D, superRegion, getKind());
+}
+
+void VarRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+  VarRegion::ProfileRegion(ID, getDecl(), superRegion);
+}
+
+void SymbolicRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, SymbolRef sym,
+                                   const MemRegion *sreg) {
+  ID.AddInteger((unsigned) MemRegion::SymbolicRegionKind);
+  ID.Add(sym);
+  ID.AddPointer(sreg);
+}
+
+void SymbolicRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+  SymbolicRegion::ProfileRegion(ID, sym, getSuperRegion());
+}
+
+void ElementRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+                                  QualType ElementType, SVal Idx,
+                                  const MemRegion* superRegion) {
+  ID.AddInteger(MemRegion::ElementRegionKind);
+  ID.Add(ElementType);
+  ID.AddPointer(superRegion);
+  Idx.Profile(ID);
+}
+
+void ElementRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+  ElementRegion::ProfileRegion(ID, ElementType, Index, superRegion);
+}
+
+void FunctionTextRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+                                       const NamedDecl *FD,
+                                       const MemRegion*) {
+  ID.AddInteger(MemRegion::FunctionTextRegionKind);
+  ID.AddPointer(FD);
+}
+
+void FunctionTextRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+  FunctionTextRegion::ProfileRegion(ID, FD, superRegion);
+}
+
+void BlockTextRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+                                    const BlockDecl *BD, CanQualType,
+                                    const AnalysisDeclContext *AC,
+                                    const MemRegion*) {
+  ID.AddInteger(MemRegion::BlockTextRegionKind);
+  ID.AddPointer(BD);
+}
+
+void BlockTextRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+  BlockTextRegion::ProfileRegion(ID, BD, locTy, AC, superRegion);
+}
+
+void BlockDataRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+                                    const BlockTextRegion *BC,
+                                    const LocationContext *LC,
+                                    const MemRegion *sReg) {
+  ID.AddInteger(MemRegion::BlockDataRegionKind);
+  ID.AddPointer(BC);
+  ID.AddPointer(LC);
+  ID.AddPointer(sReg);
+}
+
+void BlockDataRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+  BlockDataRegion::ProfileRegion(ID, BC, LC, getSuperRegion());
+}
+
+void CXXTempObjectRegion::ProfileRegion(llvm::FoldingSetNodeID &ID,
+                                        Expr const *Ex,
+                                        const MemRegion *sReg) {
+  ID.AddPointer(Ex);
+  ID.AddPointer(sReg);
+}
+
+void CXXTempObjectRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+  ProfileRegion(ID, Ex, getSuperRegion());
+}
+
+void CXXBaseObjectRegion::ProfileRegion(llvm::FoldingSetNodeID &ID,
+                                        const CXXRecordDecl *RD,
+                                        bool IsVirtual,
+                                        const MemRegion *SReg) {
+  ID.AddPointer(RD);
+  ID.AddBoolean(IsVirtual);
+  ID.AddPointer(SReg);
+}
+
+void CXXBaseObjectRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+  ProfileRegion(ID, getDecl(), isVirtual(), superRegion);
+}
+
+//===----------------------------------------------------------------------===//
+// Region anchors.
+//===----------------------------------------------------------------------===//
+
+void GlobalsSpaceRegion::anchor() { }
+void HeapSpaceRegion::anchor() { }
+void UnknownSpaceRegion::anchor() { }
+void StackLocalsSpaceRegion::anchor() { }
+void StackArgumentsSpaceRegion::anchor() { }
+void TypedRegion::anchor() { }
+void TypedValueRegion::anchor() { }
+void CodeTextRegion::anchor() { }
+void SubRegion::anchor() { }
+
+//===----------------------------------------------------------------------===//
+// Region pretty-printing.
+//===----------------------------------------------------------------------===//
+
+void MemRegion::dump() const {
+  dumpToStream(llvm::errs());
+}
+
+std::string MemRegion::getString() const {
+  std::string s;
+  llvm::raw_string_ostream os(s);
+  dumpToStream(os);
+  return os.str();
+}
+
+void MemRegion::dumpToStream(raw_ostream &os) const {
+  os << "<Unknown Region>";
+}
+
+void AllocaRegion::dumpToStream(raw_ostream &os) const {
+  os << "alloca{" << (const void*) Ex << ',' << Cnt << '}';
+}
+
+void FunctionTextRegion::dumpToStream(raw_ostream &os) const {
+  os << "code{" << getDecl()->getDeclName().getAsString() << '}';
+}
+
+void BlockTextRegion::dumpToStream(raw_ostream &os) const {
+  os << "block_code{" << (const void*) this << '}';
+}
+
+void BlockDataRegion::dumpToStream(raw_ostream &os) const {
+  os << "block_data{" << BC << '}';
+}
+
+void CompoundLiteralRegion::dumpToStream(raw_ostream &os) const {
+  // FIXME: More elaborate pretty-printing.
+  os << "{ " << (const void*) CL <<  " }";
+}
+
+void CXXTempObjectRegion::dumpToStream(raw_ostream &os) const {
+  os << "temp_object{" << getValueType().getAsString() << ','
+     << (const void*) Ex << '}';
+}
+
+void CXXBaseObjectRegion::dumpToStream(raw_ostream &os) const {
+  os << "base{" << superRegion << ',' << getDecl()->getName() << '}';
+}
+
+void CXXThisRegion::dumpToStream(raw_ostream &os) const {
+  os << "this";
+}
+
+void ElementRegion::dumpToStream(raw_ostream &os) const {
+  os << "element{" << superRegion << ','
+     << Index << ',' << getElementType().getAsString() << '}';
+}
+
+void FieldRegion::dumpToStream(raw_ostream &os) const {
+  os << superRegion << "->" << *getDecl();
+}
+
+void ObjCIvarRegion::dumpToStream(raw_ostream &os) const {
+  os << "ivar{" << superRegion << ',' << *getDecl() << '}';
+}
+
+void StringRegion::dumpToStream(raw_ostream &os) const {
+  Str->printPretty(os, 0, PrintingPolicy(getContext().getLangOpts()));
+}
+
+void ObjCStringRegion::dumpToStream(raw_ostream &os) const {
+  Str->printPretty(os, 0, PrintingPolicy(getContext().getLangOpts()));
+}
+
+void SymbolicRegion::dumpToStream(raw_ostream &os) const {
+  os << "SymRegion{" << sym << '}';
+}
+
+void VarRegion::dumpToStream(raw_ostream &os) const {
+  os << *cast<VarDecl>(D);
+}
+
+void RegionRawOffset::dump() const {
+  dumpToStream(llvm::errs());
+}
+
+void RegionRawOffset::dumpToStream(raw_ostream &os) const {
+  os << "raw_offset{" << getRegion() << ',' << getOffset().getQuantity() << '}';
+}
+
+void StaticGlobalSpaceRegion::dumpToStream(raw_ostream &os) const {
+  os << "StaticGlobalsMemSpace{" << CR << '}';
+}
+
+void GlobalInternalSpaceRegion::dumpToStream(raw_ostream &os) const {
+  os << "GlobalInternalSpaceRegion";
+}
+
+void GlobalSystemSpaceRegion::dumpToStream(raw_ostream &os) const {
+  os << "GlobalSystemSpaceRegion";
+}
+
+void GlobalImmutableSpaceRegion::dumpToStream(raw_ostream &os) const {
+  os << "GlobalImmutableSpaceRegion";
+}
+
+void HeapSpaceRegion::dumpToStream(raw_ostream &os) const {
+  os << "HeapSpaceRegion";
+}
+
+void UnknownSpaceRegion::dumpToStream(raw_ostream &os) const {
+  os << "UnknownSpaceRegion";
+}
+
+void StackArgumentsSpaceRegion::dumpToStream(raw_ostream &os) const {
+  os << "StackArgumentsSpaceRegion";
+}
+
+void StackLocalsSpaceRegion::dumpToStream(raw_ostream &os) const {
+  os << "StackLocalsSpaceRegion";
+}
+
+bool MemRegion::canPrintPretty() const {
+  return canPrintPrettyAsExpr();
+}
+
+bool MemRegion::canPrintPrettyAsExpr() const {
+  return false;
+}
+
+void MemRegion::printPretty(raw_ostream &os) const {
+  assert(canPrintPretty() && "This region cannot be printed pretty.");
+  os << "'";
+  printPrettyAsExpr(os);
+  os << "'";
+  return;
+}
+
+void MemRegion::printPrettyAsExpr(raw_ostream &os) const {
+  llvm_unreachable("This region cannot be printed pretty.");
+  return;
+}
+
+bool VarRegion::canPrintPrettyAsExpr() const {
+  return true;
+}
+
+void VarRegion::printPrettyAsExpr(raw_ostream &os) const {
+  os << getDecl()->getName();
+}
+
+bool ObjCIvarRegion::canPrintPrettyAsExpr() const {
+  return true;
+}
+
+void ObjCIvarRegion::printPrettyAsExpr(raw_ostream &os) const {
+  os << getDecl()->getName();
+}
+
+bool FieldRegion::canPrintPretty() const {
+  return true;
+}
+
+bool FieldRegion::canPrintPrettyAsExpr() const {
+  return superRegion->canPrintPrettyAsExpr();
+}
+
+void FieldRegion::printPrettyAsExpr(raw_ostream &os) const {
+  assert(canPrintPrettyAsExpr());
+  superRegion->printPrettyAsExpr(os);
+  os << "." << getDecl()->getName();
+}
+
+void FieldRegion::printPretty(raw_ostream &os) const {
+  if (canPrintPrettyAsExpr()) {
+    os << "\'";
+    printPrettyAsExpr(os);
+    os << "'";
+  } else {
+    os << "field " << "\'" << getDecl()->getName() << "'";
+  }
+  return;
+}
+
+bool CXXBaseObjectRegion::canPrintPrettyAsExpr() const {
+  return superRegion->canPrintPrettyAsExpr();
+}
+
+void CXXBaseObjectRegion::printPrettyAsExpr(raw_ostream &os) const {
+  superRegion->printPrettyAsExpr(os);
+}
+
+//===----------------------------------------------------------------------===//
+// MemRegionManager methods.
+//===----------------------------------------------------------------------===//
+
+template <typename REG>
+const REG *MemRegionManager::LazyAllocate(REG*& region) {
+  if (!region) {
+    region = (REG*) A.Allocate<REG>();
+    new (region) REG(this);
+  }
+
+  return region;
+}
+
+template <typename REG, typename ARG>
+const REG *MemRegionManager::LazyAllocate(REG*& region, ARG a) {
+  if (!region) {
+    region = (REG*) A.Allocate<REG>();
+    new (region) REG(this, a);
+  }
+
+  return region;
+}
+
+const StackLocalsSpaceRegion*
+MemRegionManager::getStackLocalsRegion(const StackFrameContext *STC) {
+  assert(STC);
+  StackLocalsSpaceRegion *&R = StackLocalsSpaceRegions[STC];
+
+  if (R)
+    return R;
+
+  R = A.Allocate<StackLocalsSpaceRegion>();
+  new (R) StackLocalsSpaceRegion(this, STC);
+  return R;
+}
+
+const StackArgumentsSpaceRegion *
+MemRegionManager::getStackArgumentsRegion(const StackFrameContext *STC) {
+  assert(STC);
+  StackArgumentsSpaceRegion *&R = StackArgumentsSpaceRegions[STC];
+
+  if (R)
+    return R;
+
+  R = A.Allocate<StackArgumentsSpaceRegion>();
+  new (R) StackArgumentsSpaceRegion(this, STC);
+  return R;
+}
+
+const GlobalsSpaceRegion
+*MemRegionManager::getGlobalsRegion(MemRegion::Kind K,
+                                    const CodeTextRegion *CR) {
+  if (!CR) {
+    if (K == MemRegion::GlobalSystemSpaceRegionKind)
+      return LazyAllocate(SystemGlobals);
+    if (K == MemRegion::GlobalImmutableSpaceRegionKind)
+      return LazyAllocate(ImmutableGlobals);
+    assert(K == MemRegion::GlobalInternalSpaceRegionKind);
+    return LazyAllocate(InternalGlobals);
+  }
+
+  assert(K == MemRegion::StaticGlobalSpaceRegionKind);
+  StaticGlobalSpaceRegion *&R = StaticsGlobalSpaceRegions[CR];
+  if (R)
+    return R;
+
+  R = A.Allocate<StaticGlobalSpaceRegion>();
+  new (R) StaticGlobalSpaceRegion(this, CR);
+  return R;
+}
+
+const HeapSpaceRegion *MemRegionManager::getHeapRegion() {
+  return LazyAllocate(heap);
+}
+
+const MemSpaceRegion *MemRegionManager::getUnknownRegion() {
+  return LazyAllocate(unknown);
+}
+
+const MemSpaceRegion *MemRegionManager::getCodeRegion() {
+  return LazyAllocate(code);
+}
+
+//===----------------------------------------------------------------------===//
+// Constructing regions.
+//===----------------------------------------------------------------------===//
+const StringRegion* MemRegionManager::getStringRegion(const StringLiteral* Str){
+  return getSubRegion<StringRegion>(Str, getGlobalsRegion());
+}
+
+const ObjCStringRegion *
+MemRegionManager::getObjCStringRegion(const ObjCStringLiteral* Str){
+  return getSubRegion<ObjCStringRegion>(Str, getGlobalsRegion());
+}
+
+/// Look through a chain of LocationContexts to either find the
+/// StackFrameContext that matches a DeclContext, or find a VarRegion
+/// for a variable captured by a block.
+static llvm::PointerUnion<const StackFrameContext *, const VarRegion *>
+getStackOrCaptureRegionForDeclContext(const LocationContext *LC,
+                                      const DeclContext *DC,
+                                      const VarDecl *VD) {
+  while (LC) {
+    if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LC)) {
+      if (cast<DeclContext>(SFC->getDecl()) == DC)
+        return SFC;
+    }
+    if (const BlockInvocationContext *BC =
+        dyn_cast<BlockInvocationContext>(LC)) {
+      const BlockDataRegion *BR =
+        static_cast<const BlockDataRegion*>(BC->getContextData());
+      // FIXME: This can be made more efficient.
+      for (BlockDataRegion::referenced_vars_iterator
+           I = BR->referenced_vars_begin(),
+           E = BR->referenced_vars_end(); I != E; ++I) {
+        if (const VarRegion *VR = dyn_cast<VarRegion>(I.getOriginalRegion()))
+          if (VR->getDecl() == VD)
+            return cast<VarRegion>(I.getCapturedRegion());
+      }
+    }
+    
+    LC = LC->getParent();
+  }
+  return (const StackFrameContext*)0;
+}
+
+const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D,
+                                                const LocationContext *LC) {
+  const MemRegion *sReg = 0;
+
+  if (D->hasGlobalStorage() && !D->isStaticLocal()) {
+
+    // First handle the globals defined in system headers.
+    if (C.getSourceManager().isInSystemHeader(D->getLocation())) {
+      // Whitelist the system globals which often DO GET modified, assume the
+      // rest are immutable.
+      if (D->getName().find("errno") != StringRef::npos)
+        sReg = getGlobalsRegion(MemRegion::GlobalSystemSpaceRegionKind);
+      else
+        sReg = getGlobalsRegion(MemRegion::GlobalImmutableSpaceRegionKind);
+
+    // Treat other globals as GlobalInternal unless they are constants.
+    } else {
+      QualType GQT = D->getType();
+      const Type *GT = GQT.getTypePtrOrNull();
+      // TODO: We could walk the complex types here and see if everything is
+      // constified.
+      if (GT && GQT.isConstQualified() && GT->isArithmeticType())
+        sReg = getGlobalsRegion(MemRegion::GlobalImmutableSpaceRegionKind);
+      else
+        sReg = getGlobalsRegion();
+    }
+  
+  // Finally handle static locals.  
+  } else {
+    // FIXME: Once we implement scope handling, we will need to properly lookup
+    // 'D' to the proper LocationContext.
+    const DeclContext *DC = D->getDeclContext();
+    llvm::PointerUnion<const StackFrameContext *, const VarRegion *> V =
+      getStackOrCaptureRegionForDeclContext(LC, DC, D);
+    
+    if (V.is<const VarRegion*>())
+      return V.get<const VarRegion*>();
+    
+    const StackFrameContext *STC = V.get<const StackFrameContext*>();
+
+    if (!STC)
+      sReg = getUnknownRegion();
+    else {
+      if (D->hasLocalStorage()) {
+        sReg = isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)
+               ? static_cast<const MemRegion*>(getStackArgumentsRegion(STC))
+               : static_cast<const MemRegion*>(getStackLocalsRegion(STC));
+      }
+      else {
+        assert(D->isStaticLocal());
+        const Decl *STCD = STC->getDecl();
+        if (isa<FunctionDecl>(STCD) || isa<ObjCMethodDecl>(STCD))
+          sReg = getGlobalsRegion(MemRegion::StaticGlobalSpaceRegionKind,
+                                  getFunctionTextRegion(cast<NamedDecl>(STCD)));
+        else if (const BlockDecl *BD = dyn_cast<BlockDecl>(STCD)) {
+          const BlockTextRegion *BTR =
+            getBlockTextRegion(BD,
+                     C.getCanonicalType(BD->getSignatureAsWritten()->getType()),
+                     STC->getAnalysisDeclContext());
+          sReg = getGlobalsRegion(MemRegion::StaticGlobalSpaceRegionKind,
+                                  BTR);
+        }
+        else {
+          sReg = getGlobalsRegion();
+        }
+      }
+    }
+  }
+
+  return getSubRegion<VarRegion>(D, sReg);
+}
+
+const VarRegion *MemRegionManager::getVarRegion(const VarDecl *D,
+                                                const MemRegion *superR) {
+  return getSubRegion<VarRegion>(D, superR);
+}
+
+const BlockDataRegion *
+MemRegionManager::getBlockDataRegion(const BlockTextRegion *BC,
+                                     const LocationContext *LC) {
+  const MemRegion *sReg = 0;
+  const BlockDecl *BD = BC->getDecl();
+  if (!BD->hasCaptures()) {
+    // This handles 'static' blocks.
+    sReg = getGlobalsRegion(MemRegion::GlobalImmutableSpaceRegionKind);
+  }
+  else {
+    if (LC) {
+      // FIXME: Once we implement scope handling, we want the parent region
+      // to be the scope.
+      const StackFrameContext *STC = LC->getCurrentStackFrame();
+      assert(STC);
+      sReg = getStackLocalsRegion(STC);
+    }
+    else {
+      // We allow 'LC' to be NULL for cases where want BlockDataRegions
+      // without context-sensitivity.
+      sReg = getUnknownRegion();
+    }
+  }
+
+  return getSubRegion<BlockDataRegion>(BC, LC, sReg);
+}
+
+const CompoundLiteralRegion*
+MemRegionManager::getCompoundLiteralRegion(const CompoundLiteralExpr *CL,
+                                           const LocationContext *LC) {
+
+  const MemRegion *sReg = 0;
+
+  if (CL->isFileScope())
+    sReg = getGlobalsRegion();
+  else {
+    const StackFrameContext *STC = LC->getCurrentStackFrame();
+    assert(STC);
+    sReg = getStackLocalsRegion(STC);
+  }
+
+  return getSubRegion<CompoundLiteralRegion>(CL, sReg);
+}
+
+const ElementRegion*
+MemRegionManager::getElementRegion(QualType elementType, NonLoc Idx,
+                                   const MemRegion* superRegion,
+                                   ASTContext &Ctx){
+
+  QualType T = Ctx.getCanonicalType(elementType).getUnqualifiedType();
+
+  llvm::FoldingSetNodeID ID;
+  ElementRegion::ProfileRegion(ID, T, Idx, superRegion);
+
+  void *InsertPos;
+  MemRegion* data = Regions.FindNodeOrInsertPos(ID, InsertPos);
+  ElementRegion* R = cast_or_null<ElementRegion>(data);
+
+  if (!R) {
+    R = (ElementRegion*) A.Allocate<ElementRegion>();
+    new (R) ElementRegion(T, Idx, superRegion);
+    Regions.InsertNode(R, InsertPos);
+  }
+
+  return R;
+}
+
+const FunctionTextRegion *
+MemRegionManager::getFunctionTextRegion(const NamedDecl *FD) {
+  return getSubRegion<FunctionTextRegion>(FD, getCodeRegion());
+}
+
+const BlockTextRegion *
+MemRegionManager::getBlockTextRegion(const BlockDecl *BD, CanQualType locTy,
+                                     AnalysisDeclContext *AC) {
+  return getSubRegion<BlockTextRegion>(BD, locTy, AC, getCodeRegion());
+}
+
+
+/// getSymbolicRegion - Retrieve or create a "symbolic" memory region.
+const SymbolicRegion *MemRegionManager::getSymbolicRegion(SymbolRef sym) {
+  return getSubRegion<SymbolicRegion>(sym, getUnknownRegion());
+}
+
+const SymbolicRegion *MemRegionManager::getSymbolicHeapRegion(SymbolRef Sym) {
+  return getSubRegion<SymbolicRegion>(Sym, getHeapRegion());
+}
+
+const FieldRegion*
+MemRegionManager::getFieldRegion(const FieldDecl *d,
+                                 const MemRegion* superRegion){
+  return getSubRegion<FieldRegion>(d, superRegion);
+}
+
+const ObjCIvarRegion*
+MemRegionManager::getObjCIvarRegion(const ObjCIvarDecl *d,
+                                    const MemRegion* superRegion) {
+  return getSubRegion<ObjCIvarRegion>(d, superRegion);
+}
+
+const CXXTempObjectRegion*
+MemRegionManager::getCXXTempObjectRegion(Expr const *E,
+                                         LocationContext const *LC) {
+  const StackFrameContext *SFC = LC->getCurrentStackFrame();
+  assert(SFC);
+  return getSubRegion<CXXTempObjectRegion>(E, getStackLocalsRegion(SFC));
+}
+
+/// Checks whether \p BaseClass is a valid virtual or direct non-virtual base
+/// class of the type of \p Super.
+static bool isValidBaseClass(const CXXRecordDecl *BaseClass,
+                             const TypedValueRegion *Super,
+                             bool IsVirtual) {
+  BaseClass = BaseClass->getCanonicalDecl();
+
+  const CXXRecordDecl *Class = Super->getValueType()->getAsCXXRecordDecl();
+  if (!Class)
+    return true;
+
+  if (IsVirtual)
+    return Class->isVirtuallyDerivedFrom(BaseClass);
+
+  for (CXXRecordDecl::base_class_const_iterator I = Class->bases_begin(),
+                                                E = Class->bases_end();
+       I != E; ++I) {
+    if (I->getType()->getAsCXXRecordDecl()->getCanonicalDecl() == BaseClass)
+      return true;
+  }
+
+  return false;
+}
+
+const CXXBaseObjectRegion *
+MemRegionManager::getCXXBaseObjectRegion(const CXXRecordDecl *RD,
+                                         const MemRegion *Super,
+                                         bool IsVirtual) {
+  if (isa<TypedValueRegion>(Super)) {
+    assert(isValidBaseClass(RD, dyn_cast<TypedValueRegion>(Super), IsVirtual));
+    (void)isValidBaseClass;
+
+    if (IsVirtual) {
+      // Virtual base regions should not be layered, since the layout rules
+      // are different.
+      while (const CXXBaseObjectRegion *Base =
+               dyn_cast<CXXBaseObjectRegion>(Super)) {
+        Super = Base->getSuperRegion();
+      }
+      assert(Super && !isa<MemSpaceRegion>(Super));
+    }
+  }
+
+  return getSubRegion<CXXBaseObjectRegion>(RD, IsVirtual, Super);
+}
+
+const CXXThisRegion*
+MemRegionManager::getCXXThisRegion(QualType thisPointerTy,
+                                   const LocationContext *LC) {
+  const StackFrameContext *STC = LC->getCurrentStackFrame();
+  assert(STC);
+  const PointerType *PT = thisPointerTy->getAs<PointerType>();
+  assert(PT);
+  return getSubRegion<CXXThisRegion>(PT, getStackArgumentsRegion(STC));
+}
+
+const AllocaRegion*
+MemRegionManager::getAllocaRegion(const Expr *E, unsigned cnt,
+                                  const LocationContext *LC) {
+  const StackFrameContext *STC = LC->getCurrentStackFrame();
+  assert(STC);
+  return getSubRegion<AllocaRegion>(E, cnt, getStackLocalsRegion(STC));
+}
+
+const MemSpaceRegion *MemRegion::getMemorySpace() const {
+  const MemRegion *R = this;
+  const SubRegion* SR = dyn_cast<SubRegion>(this);
+
+  while (SR) {
+    R = SR->getSuperRegion();
+    SR = dyn_cast<SubRegion>(R);
+  }
+
+  return dyn_cast<MemSpaceRegion>(R);
+}
+
+bool MemRegion::hasStackStorage() const {
+  return isa<StackSpaceRegion>(getMemorySpace());
+}
+
+bool MemRegion::hasStackNonParametersStorage() const {
+  return isa<StackLocalsSpaceRegion>(getMemorySpace());
+}
+
+bool MemRegion::hasStackParametersStorage() const {
+  return isa<StackArgumentsSpaceRegion>(getMemorySpace());
+}
+
+bool MemRegion::hasGlobalsOrParametersStorage() const {
+  const MemSpaceRegion *MS = getMemorySpace();
+  return isa<StackArgumentsSpaceRegion>(MS) ||
+         isa<GlobalsSpaceRegion>(MS);
+}
+
+// getBaseRegion strips away all elements and fields, and get the base region
+// of them.
+const MemRegion *MemRegion::getBaseRegion() const {
+  const MemRegion *R = this;
+  while (true) {
+    switch (R->getKind()) {
+      case MemRegion::ElementRegionKind:
+      case MemRegion::FieldRegionKind:
+      case MemRegion::ObjCIvarRegionKind:
+      case MemRegion::CXXBaseObjectRegionKind:
+        R = cast<SubRegion>(R)->getSuperRegion();
+        continue;
+      default:
+        break;
+    }
+    break;
+  }
+  return R;
+}
+
+bool MemRegion::isSubRegionOf(const MemRegion *R) const {
+  return false;
+}
+
+//===----------------------------------------------------------------------===//
+// View handling.
+//===----------------------------------------------------------------------===//
+
+const MemRegion *MemRegion::StripCasts(bool StripBaseCasts) const {
+  const MemRegion *R = this;
+  while (true) {
+    switch (R->getKind()) {
+    case ElementRegionKind: {
+      const ElementRegion *ER = cast<ElementRegion>(R);
+      if (!ER->getIndex().isZeroConstant())
+        return R;
+      R = ER->getSuperRegion();
+      break;
+    }
+    case CXXBaseObjectRegionKind:
+      if (!StripBaseCasts)
+        return R;
+      R = cast<CXXBaseObjectRegion>(R)->getSuperRegion();
+      break;
+    default:
+      return R;
+    }
+  }
+}
+
+const SymbolicRegion *MemRegion::getSymbolicBase() const {
+  const SubRegion *SubR = dyn_cast<SubRegion>(this);
+
+  while (SubR) {
+    if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(SubR))
+      return SymR;
+    SubR = dyn_cast<SubRegion>(SubR->getSuperRegion());
+  }
+  return 0;
+}
+
+// FIXME: Merge with the implementation of the same method in Store.cpp
+static bool IsCompleteType(ASTContext &Ctx, QualType Ty) {
+  if (const RecordType *RT = Ty->getAs<RecordType>()) {
+    const RecordDecl *D = RT->getDecl();
+    if (!D->getDefinition())
+      return false;
+  }
+
+  return true;
+}
+
+RegionRawOffset ElementRegion::getAsArrayOffset() const {
+  CharUnits offset = CharUnits::Zero();
+  const ElementRegion *ER = this;
+  const MemRegion *superR = NULL;
+  ASTContext &C = getContext();
+
+  // FIXME: Handle multi-dimensional arrays.
+
+  while (ER) {
+    superR = ER->getSuperRegion();
+
+    // FIXME: generalize to symbolic offsets.
+    SVal index = ER->getIndex();
+    if (Optional<nonloc::ConcreteInt> CI = index.getAs<nonloc::ConcreteInt>()) {
+      // Update the offset.
+      int64_t i = CI->getValue().getSExtValue();
+
+      if (i != 0) {
+        QualType elemType = ER->getElementType();
+
+        // If we are pointing to an incomplete type, go no further.
+        if (!IsCompleteType(C, elemType)) {
+          superR = ER;
+          break;
+        }
+
+        CharUnits size = C.getTypeSizeInChars(elemType);
+        offset += (i * size);
+      }
+
+      // Go to the next ElementRegion (if any).
+      ER = dyn_cast<ElementRegion>(superR);
+      continue;
+    }
+
+    return NULL;
+  }
+
+  assert(superR && "super region cannot be NULL");
+  return RegionRawOffset(superR, offset);
+}
+
+
+/// Returns true if \p Base is an immediate base class of \p Child
+static bool isImmediateBase(const CXXRecordDecl *Child,
+                            const CXXRecordDecl *Base) {
+  // Note that we do NOT canonicalize the base class here, because
+  // ASTRecordLayout doesn't either. If that leads us down the wrong path,
+  // so be it; at least we won't crash.
+  for (CXXRecordDecl::base_class_const_iterator I = Child->bases_begin(),
+                                                E = Child->bases_end();
+       I != E; ++I) {
+    if (I->getType()->getAsCXXRecordDecl() == Base)
+      return true;
+  }
+
+  return false;
+}
+
+RegionOffset MemRegion::getAsOffset() const {
+  const MemRegion *R = this;
+  const MemRegion *SymbolicOffsetBase = 0;
+  int64_t Offset = 0;
+
+  while (1) {
+    switch (R->getKind()) {
+    case GenericMemSpaceRegionKind:
+    case StackLocalsSpaceRegionKind:
+    case StackArgumentsSpaceRegionKind:
+    case HeapSpaceRegionKind:
+    case UnknownSpaceRegionKind:
+    case StaticGlobalSpaceRegionKind:
+    case GlobalInternalSpaceRegionKind:
+    case GlobalSystemSpaceRegionKind:
+    case GlobalImmutableSpaceRegionKind:
+      // Stores can bind directly to a region space to set a default value.
+      assert(Offset == 0 && !SymbolicOffsetBase);
+      goto Finish;
+
+    case FunctionTextRegionKind:
+    case BlockTextRegionKind:
+    case BlockDataRegionKind:
+      // These will never have bindings, but may end up having values requested
+      // if the user does some strange casting.
+      if (Offset != 0)
+        SymbolicOffsetBase = R;
+      goto Finish;
+
+    case SymbolicRegionKind:
+    case AllocaRegionKind:
+    case CompoundLiteralRegionKind:
+    case CXXThisRegionKind:
+    case StringRegionKind:
+    case ObjCStringRegionKind:
+    case VarRegionKind:
+    case CXXTempObjectRegionKind:
+      // Usual base regions.
+      goto Finish;
+
+    case ObjCIvarRegionKind:
+      // This is a little strange, but it's a compromise between
+      // ObjCIvarRegions having unknown compile-time offsets (when using the
+      // non-fragile runtime) and yet still being distinct, non-overlapping
+      // regions. Thus we treat them as "like" base regions for the purposes
+      // of computing offsets.
+      goto Finish;
+
+    case CXXBaseObjectRegionKind: {
+      const CXXBaseObjectRegion *BOR = cast<CXXBaseObjectRegion>(R);
+      R = BOR->getSuperRegion();
+
+      QualType Ty;
+      bool RootIsSymbolic = false;
+      if (const TypedValueRegion *TVR = dyn_cast<TypedValueRegion>(R)) {
+        Ty = TVR->getDesugaredValueType(getContext());
+      } else if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R)) {
+        // If our base region is symbolic, we don't know what type it really is.
+        // Pretend the type of the symbol is the true dynamic type.
+        // (This will at least be self-consistent for the life of the symbol.)
+        Ty = SR->getSymbol()->getType()->getPointeeType();
+        RootIsSymbolic = true;
+      }
+      
+      const CXXRecordDecl *Child = Ty->getAsCXXRecordDecl();
+      if (!Child) {
+        // We cannot compute the offset of the base class.
+        SymbolicOffsetBase = R;
+      }
+
+      if (RootIsSymbolic) {
+        // Base layers on symbolic regions may not be type-correct.
+        // Double-check the inheritance here, and revert to a symbolic offset
+        // if it's invalid (e.g. due to a reinterpret_cast).
+        if (BOR->isVirtual()) {
+          if (!Child->isVirtuallyDerivedFrom(BOR->getDecl()))
+            SymbolicOffsetBase = R;
+        } else {
+          if (!isImmediateBase(Child, BOR->getDecl()))
+            SymbolicOffsetBase = R;
+        }
+      }
+
+      // Don't bother calculating precise offsets if we already have a
+      // symbolic offset somewhere in the chain.
+      if (SymbolicOffsetBase)
+        continue;
+
+      CharUnits BaseOffset;
+      const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Child);
+      if (BOR->isVirtual())
+        BaseOffset = Layout.getVBaseClassOffset(BOR->getDecl());
+      else
+        BaseOffset = Layout.getBaseClassOffset(BOR->getDecl());
+
+      // The base offset is in chars, not in bits.
+      Offset += BaseOffset.getQuantity() * getContext().getCharWidth();
+      break;
+    }
+    case ElementRegionKind: {
+      const ElementRegion *ER = cast<ElementRegion>(R);
+      R = ER->getSuperRegion();
+
+      QualType EleTy = ER->getValueType();
+      if (!IsCompleteType(getContext(), EleTy)) {
+        // We cannot compute the offset of the base class.
+        SymbolicOffsetBase = R;
+        continue;
+      }
+
+      SVal Index = ER->getIndex();
+      if (Optional<nonloc::ConcreteInt> CI =
+              Index.getAs<nonloc::ConcreteInt>()) {
+        // Don't bother calculating precise offsets if we already have a
+        // symbolic offset somewhere in the chain. 
+        if (SymbolicOffsetBase)
+          continue;
+
+        int64_t i = CI->getValue().getSExtValue();
+        // This type size is in bits.
+        Offset += i * getContext().getTypeSize(EleTy);
+      } else {
+        // We cannot compute offset for non-concrete index.
+        SymbolicOffsetBase = R;
+      }
+      break;
+    }
+    case FieldRegionKind: {
+      const FieldRegion *FR = cast<FieldRegion>(R);
+      R = FR->getSuperRegion();
+
+      const RecordDecl *RD = FR->getDecl()->getParent();
+      if (RD->isUnion() || !RD->isCompleteDefinition()) {
+        // We cannot compute offset for incomplete type.
+        // For unions, we could treat everything as offset 0, but we'd rather
+        // treat each field as a symbolic offset so they aren't stored on top
+        // of each other, since we depend on things in typed regions actually
+        // matching their types.
+        SymbolicOffsetBase = R;
+      }
+
+      // Don't bother calculating precise offsets if we already have a
+      // symbolic offset somewhere in the chain.
+      if (SymbolicOffsetBase)
+        continue;
+
+      // Get the field number.
+      unsigned idx = 0;
+      for (RecordDecl::field_iterator FI = RD->field_begin(), 
+             FE = RD->field_end(); FI != FE; ++FI, ++idx)
+        if (FR->getDecl() == *FI)
+          break;
+
+      const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+      // This is offset in bits.
+      Offset += Layout.getFieldOffset(idx);
+      break;
+    }
+    }
+  }
+
+ Finish:
+  if (SymbolicOffsetBase)
+    return RegionOffset(SymbolicOffsetBase, RegionOffset::Symbolic);
+  return RegionOffset(R, Offset);
+}
+
+//===----------------------------------------------------------------------===//
+// BlockDataRegion
+//===----------------------------------------------------------------------===//
+
+std::pair<const VarRegion *, const VarRegion *>
+BlockDataRegion::getCaptureRegions(const VarDecl *VD) {
+  MemRegionManager &MemMgr = *getMemRegionManager();
+  const VarRegion *VR = 0;
+  const VarRegion *OriginalVR = 0;
+
+  if (!VD->getAttr<BlocksAttr>() && VD->hasLocalStorage()) {
+    VR = MemMgr.getVarRegion(VD, this);
+    OriginalVR = MemMgr.getVarRegion(VD, LC);
+  }
+  else {
+    if (LC) {
+      VR = MemMgr.getVarRegion(VD, LC);
+      OriginalVR = VR;
+    }
+    else {
+      VR = MemMgr.getVarRegion(VD, MemMgr.getUnknownRegion());
+      OriginalVR = MemMgr.getVarRegion(VD, LC);
+    }
+  }
+  return std::make_pair(VR, OriginalVR);
+}
+
+void BlockDataRegion::LazyInitializeReferencedVars() {
+  if (ReferencedVars)
+    return;
+
+  AnalysisDeclContext *AC = getCodeRegion()->getAnalysisDeclContext();
+  AnalysisDeclContext::referenced_decls_iterator I, E;
+  llvm::tie(I, E) = AC->getReferencedBlockVars(BC->getDecl());
+
+  if (I == E) {
+    ReferencedVars = (void*) 0x1;
+    return;
+  }
+
+  MemRegionManager &MemMgr = *getMemRegionManager();
+  llvm::BumpPtrAllocator &A = MemMgr.getAllocator();
+  BumpVectorContext BC(A);
+
+  typedef BumpVector<const MemRegion*> VarVec;
+  VarVec *BV = (VarVec*) A.Allocate<VarVec>();
+  new (BV) VarVec(BC, E - I);
+  VarVec *BVOriginal = (VarVec*) A.Allocate<VarVec>();
+  new (BVOriginal) VarVec(BC, E - I);
+
+  for ( ; I != E; ++I) {
+    const VarRegion *VR = 0;
+    const VarRegion *OriginalVR = 0;
+    llvm::tie(VR, OriginalVR) = getCaptureRegions(*I);
+    assert(VR);
+    assert(OriginalVR);
+    BV->push_back(VR, BC);
+    BVOriginal->push_back(OriginalVR, BC);
+  }
+
+  ReferencedVars = BV;
+  OriginalVars = BVOriginal;
+}
+
+BlockDataRegion::referenced_vars_iterator
+BlockDataRegion::referenced_vars_begin() const {
+  const_cast<BlockDataRegion*>(this)->LazyInitializeReferencedVars();
+
+  BumpVector<const MemRegion*> *Vec =
+    static_cast<BumpVector<const MemRegion*>*>(ReferencedVars);
+
+  if (Vec == (void*) 0x1)
+    return BlockDataRegion::referenced_vars_iterator(0, 0);
+  
+  BumpVector<const MemRegion*> *VecOriginal =
+    static_cast<BumpVector<const MemRegion*>*>(OriginalVars);
+  
+  return BlockDataRegion::referenced_vars_iterator(Vec->begin(),
+                                                   VecOriginal->begin());
+}
+
+BlockDataRegion::referenced_vars_iterator
+BlockDataRegion::referenced_vars_end() const {
+  const_cast<BlockDataRegion*>(this)->LazyInitializeReferencedVars();
+
+  BumpVector<const MemRegion*> *Vec =
+    static_cast<BumpVector<const MemRegion*>*>(ReferencedVars);
+
+  if (Vec == (void*) 0x1)
+    return BlockDataRegion::referenced_vars_iterator(0, 0);
+  
+  BumpVector<const MemRegion*> *VecOriginal =
+    static_cast<BumpVector<const MemRegion*>*>(OriginalVars);
+
+  return BlockDataRegion::referenced_vars_iterator(Vec->end(),
+                                                   VecOriginal->end());
+}
+
+const VarRegion *BlockDataRegion::getOriginalRegion(const VarRegion *R) const {
+  for (referenced_vars_iterator I = referenced_vars_begin(),
+                                E = referenced_vars_end();
+       I != E; ++I) {
+    if (I.getCapturedRegion() == R)
+      return I.getOriginalRegion();
+  }
+  return 0;
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
new file mode 100644
index 0000000..0351310
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
@@ -0,0 +1,1101 @@
+//===--- PathDiagnostic.cpp - Path-Specific Diagnostic Handling -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines the PathDiagnostic-related interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+bool PathDiagnosticMacroPiece::containsEvent() const {
+  for (PathPieces::const_iterator I = subPieces.begin(), E = subPieces.end();
+       I!=E; ++I) {
+    if (isa<PathDiagnosticEventPiece>(*I))
+      return true;
+    if (PathDiagnosticMacroPiece *MP = dyn_cast<PathDiagnosticMacroPiece>(*I))
+      if (MP->containsEvent())
+        return true;
+  }
+  return false;
+}
+
+static StringRef StripTrailingDots(StringRef s) {
+  for (StringRef::size_type i = s.size(); i != 0; --i)
+    if (s[i - 1] != '.')
+      return s.substr(0, i);
+  return "";
+}
+
+PathDiagnosticPiece::PathDiagnosticPiece(StringRef s,
+                                         Kind k, DisplayHint hint)
+  : str(StripTrailingDots(s)), kind(k), Hint(hint) {}
+
+PathDiagnosticPiece::PathDiagnosticPiece(Kind k, DisplayHint hint)
+  : kind(k), Hint(hint) {}
+
+PathDiagnosticPiece::~PathDiagnosticPiece() {}
+PathDiagnosticEventPiece::~PathDiagnosticEventPiece() {}
+PathDiagnosticCallPiece::~PathDiagnosticCallPiece() {}
+PathDiagnosticControlFlowPiece::~PathDiagnosticControlFlowPiece() {}
+PathDiagnosticMacroPiece::~PathDiagnosticMacroPiece() {}
+
+
+PathPieces::~PathPieces() {}
+
+void PathPieces::flattenTo(PathPieces &Primary, PathPieces &Current,
+                           bool ShouldFlattenMacros) const {
+  for (PathPieces::const_iterator I = begin(), E = end(); I != E; ++I) {
+    PathDiagnosticPiece *Piece = I->getPtr();
+
+    switch (Piece->getKind()) {
+    case PathDiagnosticPiece::Call: {
+      PathDiagnosticCallPiece *Call = cast<PathDiagnosticCallPiece>(Piece);
+      IntrusiveRefCntPtr<PathDiagnosticEventPiece> CallEnter =
+        Call->getCallEnterEvent();
+      if (CallEnter)
+        Current.push_back(CallEnter);
+      Call->path.flattenTo(Primary, Primary, ShouldFlattenMacros);
+      IntrusiveRefCntPtr<PathDiagnosticEventPiece> callExit =
+        Call->getCallExitEvent();
+      if (callExit)
+        Current.push_back(callExit);
+      break;
+    }
+    case PathDiagnosticPiece::Macro: {
+      PathDiagnosticMacroPiece *Macro = cast<PathDiagnosticMacroPiece>(Piece);
+      if (ShouldFlattenMacros) {
+        Macro->subPieces.flattenTo(Primary, Primary, ShouldFlattenMacros);
+      } else {
+        Current.push_back(Piece);
+        PathPieces NewPath;
+        Macro->subPieces.flattenTo(Primary, NewPath, ShouldFlattenMacros);
+        // FIXME: This probably shouldn't mutate the original path piece.
+        Macro->subPieces = NewPath;
+      }
+      break;
+    }
+    case PathDiagnosticPiece::Event:
+    case PathDiagnosticPiece::ControlFlow:
+      Current.push_back(Piece);
+      break;
+    }
+  }
+}
+
+
+PathDiagnostic::~PathDiagnostic() {}
+
+PathDiagnostic::PathDiagnostic(const Decl *declWithIssue,
+                               StringRef bugtype, StringRef verboseDesc,
+                               StringRef shortDesc, StringRef category,
+                               PathDiagnosticLocation LocationToUnique,
+                               const Decl *DeclToUnique)
+  : DeclWithIssue(declWithIssue),
+    BugType(StripTrailingDots(bugtype)),
+    VerboseDesc(StripTrailingDots(verboseDesc)),
+    ShortDesc(StripTrailingDots(shortDesc)),
+    Category(StripTrailingDots(category)),
+    UniqueingLoc(LocationToUnique),
+    UniqueingDecl(DeclToUnique),
+    path(pathImpl) {}
+
+void PathDiagnosticConsumer::anchor() { }
+
+PathDiagnosticConsumer::~PathDiagnosticConsumer() {
+  // Delete the contents of the FoldingSet if it isn't empty already.
+  for (llvm::FoldingSet<PathDiagnostic>::iterator it =
+       Diags.begin(), et = Diags.end() ; it != et ; ++it) {
+    delete &*it;
+  }
+}
+
+void PathDiagnosticConsumer::HandlePathDiagnostic(PathDiagnostic *D) {
+  OwningPtr<PathDiagnostic> OwningD(D);
+  
+  if (!D || D->path.empty())
+    return;
+  
+  // We need to flatten the locations (convert Stmt* to locations) because
+  // the referenced statements may be freed by the time the diagnostics
+  // are emitted.
+  D->flattenLocations();
+
+  // If the PathDiagnosticConsumer does not support diagnostics that
+  // cross file boundaries, prune out such diagnostics now.
+  if (!supportsCrossFileDiagnostics()) {
+    // Verify that the entire path is from the same FileID.
+    FileID FID;
+    const SourceManager &SMgr = (*D->path.begin())->getLocation().getManager();
+    SmallVector<const PathPieces *, 5> WorkList;
+    WorkList.push_back(&D->path);
+
+    while (!WorkList.empty()) {
+      const PathPieces &path = *WorkList.back();
+      WorkList.pop_back();
+
+      for (PathPieces::const_iterator I = path.begin(), E = path.end();
+           I != E; ++I) {
+        const PathDiagnosticPiece *piece = I->getPtr();
+        FullSourceLoc L = piece->getLocation().asLocation().getExpansionLoc();
+      
+        if (FID.isInvalid()) {
+          FID = SMgr.getFileID(L);
+        } else if (SMgr.getFileID(L) != FID)
+          return; // FIXME: Emit a warning?
+      
+        // Check the source ranges.
+        ArrayRef<SourceRange> Ranges = piece->getRanges();
+        for (ArrayRef<SourceRange>::iterator I = Ranges.begin(),
+                                             E = Ranges.end(); I != E; ++I) {
+          SourceLocation L = SMgr.getExpansionLoc(I->getBegin());
+          if (!L.isFileID() || SMgr.getFileID(L) != FID)
+            return; // FIXME: Emit a warning?
+          L = SMgr.getExpansionLoc(I->getEnd());
+          if (!L.isFileID() || SMgr.getFileID(L) != FID)
+            return; // FIXME: Emit a warning?
+        }
+        
+        if (const PathDiagnosticCallPiece *call =
+            dyn_cast<PathDiagnosticCallPiece>(piece)) {
+          WorkList.push_back(&call->path);
+        }
+        else if (const PathDiagnosticMacroPiece *macro =
+                 dyn_cast<PathDiagnosticMacroPiece>(piece)) {
+          WorkList.push_back(&macro->subPieces);
+        }
+      }
+    }
+    
+    if (FID.isInvalid())
+      return; // FIXME: Emit a warning?
+  }  
+
+  // Profile the node to see if we already have something matching it
+  llvm::FoldingSetNodeID profile;
+  D->Profile(profile);
+  void *InsertPos = 0;
+
+  if (PathDiagnostic *orig = Diags.FindNodeOrInsertPos(profile, InsertPos)) {
+    // Keep the PathDiagnostic with the shorter path.
+    // Note, the enclosing routine is called in deterministic order, so the
+    // results will be consistent between runs (no reason to break ties if the
+    // size is the same).
+    const unsigned orig_size = orig->full_size();
+    const unsigned new_size = D->full_size();
+    if (orig_size <= new_size)
+      return;
+
+    assert(orig != D);
+    Diags.RemoveNode(orig);
+    delete orig;
+  }
+  
+  Diags.InsertNode(OwningD.take());
+}
+
+static Optional<bool> comparePath(const PathPieces &X, const PathPieces &Y);
+static Optional<bool>
+compareControlFlow(const PathDiagnosticControlFlowPiece &X,
+                   const PathDiagnosticControlFlowPiece &Y) {
+  FullSourceLoc XSL = X.getStartLocation().asLocation();
+  FullSourceLoc YSL = Y.getStartLocation().asLocation();
+  if (XSL != YSL)
+    return XSL.isBeforeInTranslationUnitThan(YSL);
+  FullSourceLoc XEL = X.getEndLocation().asLocation();
+  FullSourceLoc YEL = Y.getEndLocation().asLocation();
+  if (XEL != YEL)
+    return XEL.isBeforeInTranslationUnitThan(YEL);
+  return None;
+}
+
+static Optional<bool> compareMacro(const PathDiagnosticMacroPiece &X,
+                                   const PathDiagnosticMacroPiece &Y) {
+  return comparePath(X.subPieces, Y.subPieces);
+}
+
+static Optional<bool> compareCall(const PathDiagnosticCallPiece &X,
+                                  const PathDiagnosticCallPiece &Y) {
+  FullSourceLoc X_CEL = X.callEnter.asLocation();
+  FullSourceLoc Y_CEL = Y.callEnter.asLocation();
+  if (X_CEL != Y_CEL)
+    return X_CEL.isBeforeInTranslationUnitThan(Y_CEL);
+  FullSourceLoc X_CEWL = X.callEnterWithin.asLocation();
+  FullSourceLoc Y_CEWL = Y.callEnterWithin.asLocation();
+  if (X_CEWL != Y_CEWL)
+    return X_CEWL.isBeforeInTranslationUnitThan(Y_CEWL);
+  FullSourceLoc X_CRL = X.callReturn.asLocation();
+  FullSourceLoc Y_CRL = Y.callReturn.asLocation();
+  if (X_CRL != Y_CRL)
+    return X_CRL.isBeforeInTranslationUnitThan(Y_CRL);
+  return comparePath(X.path, Y.path);
+}
+
+static Optional<bool> comparePiece(const PathDiagnosticPiece &X,
+                                   const PathDiagnosticPiece &Y) {
+  if (X.getKind() != Y.getKind())
+    return X.getKind() < Y.getKind();
+  
+  FullSourceLoc XL = X.getLocation().asLocation();
+  FullSourceLoc YL = Y.getLocation().asLocation();
+  if (XL != YL)
+    return XL.isBeforeInTranslationUnitThan(YL);
+
+  if (X.getString() != Y.getString())
+    return X.getString() < Y.getString();
+
+  if (X.getRanges().size() != Y.getRanges().size())
+    return X.getRanges().size() < Y.getRanges().size();
+
+  const SourceManager &SM = XL.getManager();
+  
+  for (unsigned i = 0, n = X.getRanges().size(); i < n; ++i) {
+    SourceRange XR = X.getRanges()[i];
+    SourceRange YR = Y.getRanges()[i];
+    if (XR != YR) {
+      if (XR.getBegin() != YR.getBegin())
+        return SM.isBeforeInTranslationUnit(XR.getBegin(), YR.getBegin());
+      return SM.isBeforeInTranslationUnit(XR.getEnd(), YR.getEnd());
+    }
+  }
+  
+  switch (X.getKind()) {
+    case clang::ento::PathDiagnosticPiece::ControlFlow:
+      return compareControlFlow(cast<PathDiagnosticControlFlowPiece>(X),
+                                cast<PathDiagnosticControlFlowPiece>(Y));
+    case clang::ento::PathDiagnosticPiece::Event:
+      return None;
+    case clang::ento::PathDiagnosticPiece::Macro:
+      return compareMacro(cast<PathDiagnosticMacroPiece>(X),
+                          cast<PathDiagnosticMacroPiece>(Y));
+    case clang::ento::PathDiagnosticPiece::Call:
+      return compareCall(cast<PathDiagnosticCallPiece>(X),
+                         cast<PathDiagnosticCallPiece>(Y));
+  }
+  llvm_unreachable("all cases handled");
+}
+
+static Optional<bool> comparePath(const PathPieces &X, const PathPieces &Y) {
+  if (X.size() != Y.size())
+    return X.size() < Y.size();
+
+  PathPieces::const_iterator X_I = X.begin(), X_end = X.end();
+  PathPieces::const_iterator Y_I = Y.begin(), Y_end = Y.end();
+
+  for ( ; X_I != X_end && Y_I != Y_end; ++X_I, ++Y_I) {
+    Optional<bool> b = comparePiece(**X_I, **Y_I);
+    if (b.hasValue())
+      return b.getValue();
+  }
+
+  return None;
+}
+
+static bool compare(const PathDiagnostic &X, const PathDiagnostic &Y) {
+  FullSourceLoc XL = X.getLocation().asLocation();
+  FullSourceLoc YL = Y.getLocation().asLocation();
+  if (XL != YL)
+    return XL.isBeforeInTranslationUnitThan(YL);
+  if (X.getBugType() != Y.getBugType())
+    return X.getBugType() < Y.getBugType();
+  if (X.getCategory() != Y.getCategory())
+    return X.getCategory() < Y.getCategory();
+  if (X.getVerboseDescription() != Y.getVerboseDescription())
+    return X.getVerboseDescription() < Y.getVerboseDescription();
+  if (X.getShortDescription() != Y.getShortDescription())
+    return X.getShortDescription() < Y.getShortDescription();
+  if (X.getDeclWithIssue() != Y.getDeclWithIssue()) {
+    const Decl *XD = X.getDeclWithIssue();
+    if (!XD)
+      return true;
+    const Decl *YD = Y.getDeclWithIssue();
+    if (!YD)
+      return false;
+    SourceLocation XDL = XD->getLocation();
+    SourceLocation YDL = YD->getLocation();
+    if (XDL != YDL) {
+      const SourceManager &SM = XL.getManager();
+      return SM.isBeforeInTranslationUnit(XDL, YDL);
+    }
+  }
+  PathDiagnostic::meta_iterator XI = X.meta_begin(), XE = X.meta_end();
+  PathDiagnostic::meta_iterator YI = Y.meta_begin(), YE = Y.meta_end();
+  if (XE - XI != YE - YI)
+    return (XE - XI) < (YE - YI);
+  for ( ; XI != XE ; ++XI, ++YI) {
+    if (*XI != *YI)
+      return (*XI) < (*YI);
+  }
+  Optional<bool> b = comparePath(X.path, Y.path);
+  assert(b.hasValue());
+  return b.getValue();
+}
+
+namespace {
+struct CompareDiagnostics {
+  // Compare if 'X' is "<" than 'Y'.
+  bool operator()(const PathDiagnostic *X, const PathDiagnostic *Y) const {
+    if (X == Y)
+      return false;
+    return compare(*X, *Y);
+  }
+};
+}
+
+void PathDiagnosticConsumer::FlushDiagnostics(
+                                     PathDiagnosticConsumer::FilesMade *Files) {
+  if (flushed)
+    return;
+  
+  flushed = true;
+  
+  std::vector<const PathDiagnostic *> BatchDiags;
+  for (llvm::FoldingSet<PathDiagnostic>::iterator it = Diags.begin(),
+       et = Diags.end(); it != et; ++it) {
+    const PathDiagnostic *D = &*it;
+    BatchDiags.push_back(D);
+  }
+
+  // Sort the diagnostics so that they are always emitted in a deterministic
+  // order.
+  if (!BatchDiags.empty())
+    std::sort(BatchDiags.begin(), BatchDiags.end(), CompareDiagnostics());
+  
+  FlushDiagnosticsImpl(BatchDiags, Files);
+
+  // Delete the flushed diagnostics.
+  for (std::vector<const PathDiagnostic *>::iterator it = BatchDiags.begin(),
+       et = BatchDiags.end(); it != et; ++it) {
+    const PathDiagnostic *D = *it;
+    delete D;
+  }
+  
+  // Clear out the FoldingSet.
+  Diags.clear();
+}
+
+void PathDiagnosticConsumer::FilesMade::addDiagnostic(const PathDiagnostic &PD,
+                                                      StringRef ConsumerName,
+                                                      StringRef FileName) {
+  llvm::FoldingSetNodeID NodeID;
+  NodeID.Add(PD);
+  void *InsertPos;
+  PDFileEntry *Entry = FindNodeOrInsertPos(NodeID, InsertPos);
+  if (!Entry) {
+    Entry = Alloc.Allocate<PDFileEntry>();
+    Entry = new (Entry) PDFileEntry(NodeID);
+    InsertNode(Entry, InsertPos);
+  }
+  
+  // Allocate persistent storage for the file name.
+  char *FileName_cstr = (char*) Alloc.Allocate(FileName.size(), 1);
+  memcpy(FileName_cstr, FileName.data(), FileName.size());
+
+  Entry->files.push_back(std::make_pair(ConsumerName,
+                                        StringRef(FileName_cstr,
+                                                  FileName.size())));
+}
+
+PathDiagnosticConsumer::PDFileEntry::ConsumerFiles *
+PathDiagnosticConsumer::FilesMade::getFiles(const PathDiagnostic &PD) {
+  llvm::FoldingSetNodeID NodeID;
+  NodeID.Add(PD);
+  void *InsertPos;
+  PDFileEntry *Entry = FindNodeOrInsertPos(NodeID, InsertPos);
+  if (!Entry)
+    return 0;
+  return &Entry->files;
+}
+
+//===----------------------------------------------------------------------===//
+// PathDiagnosticLocation methods.
+//===----------------------------------------------------------------------===//
+
+static SourceLocation getValidSourceLocation(const Stmt* S,
+                                             LocationOrAnalysisDeclContext LAC,
+                                             bool UseEnd = false) {
+  SourceLocation L = UseEnd ? S->getLocEnd() : S->getLocStart();
+  assert(!LAC.isNull() && "A valid LocationContext or AnalysisDeclContext should "
+                          "be passed to PathDiagnosticLocation upon creation.");
+
+  // S might be a temporary statement that does not have a location in the
+  // source code, so find an enclosing statement and use its location.
+  if (!L.isValid()) {
+
+    AnalysisDeclContext *ADC;
+    if (LAC.is<const LocationContext*>())
+      ADC = LAC.get<const LocationContext*>()->getAnalysisDeclContext();
+    else
+      ADC = LAC.get<AnalysisDeclContext*>();
+
+    ParentMap &PM = ADC->getParentMap();
+
+    const Stmt *Parent = S;
+    do {
+      Parent = PM.getParent(Parent);
+
+      // In rare cases, we have implicit top-level expressions,
+      // such as arguments for implicit member initializers.
+      // In this case, fall back to the start of the body (even if we were
+      // asked for the statement end location).
+      if (!Parent) {
+        const Stmt *Body = ADC->getBody();
+        if (Body)
+          L = Body->getLocStart();
+        else
+          L = ADC->getDecl()->getLocEnd();
+        break;
+      }
+
+      L = UseEnd ? Parent->getLocEnd() : Parent->getLocStart();
+    } while (!L.isValid());
+  }
+
+  return L;
+}
+
+static PathDiagnosticLocation
+getLocationForCaller(const StackFrameContext *SFC,
+                     const LocationContext *CallerCtx,
+                     const SourceManager &SM) {
+  const CFGBlock &Block = *SFC->getCallSiteBlock();
+  CFGElement Source = Block[SFC->getIndex()];
+
+  switch (Source.getKind()) {
+  case CFGElement::Statement:
+    return PathDiagnosticLocation(Source.castAs<CFGStmt>().getStmt(),
+                                  SM, CallerCtx);
+  case CFGElement::Initializer: {
+    const CFGInitializer &Init = Source.castAs<CFGInitializer>();
+    return PathDiagnosticLocation(Init.getInitializer()->getInit(),
+                                  SM, CallerCtx);
+  }
+  case CFGElement::AutomaticObjectDtor: {
+    const CFGAutomaticObjDtor &Dtor = Source.castAs<CFGAutomaticObjDtor>();
+    return PathDiagnosticLocation::createEnd(Dtor.getTriggerStmt(),
+                                             SM, CallerCtx);
+  }
+  case CFGElement::BaseDtor:
+  case CFGElement::MemberDtor: {
+    const AnalysisDeclContext *CallerInfo = CallerCtx->getAnalysisDeclContext();
+    if (const Stmt *CallerBody = CallerInfo->getBody())
+      return PathDiagnosticLocation::createEnd(CallerBody, SM, CallerCtx);
+    return PathDiagnosticLocation::create(CallerInfo->getDecl(), SM);
+  }
+  case CFGElement::TemporaryDtor:
+    llvm_unreachable("not yet implemented!");
+  }
+
+  llvm_unreachable("Unknown CFGElement kind");
+}
+
+
+PathDiagnosticLocation
+  PathDiagnosticLocation::createBegin(const Decl *D,
+                                      const SourceManager &SM) {
+  return PathDiagnosticLocation(D->getLocStart(), SM, SingleLocK);
+}
+
+PathDiagnosticLocation
+  PathDiagnosticLocation::createBegin(const Stmt *S,
+                                      const SourceManager &SM,
+                                      LocationOrAnalysisDeclContext LAC) {
+  return PathDiagnosticLocation(getValidSourceLocation(S, LAC),
+                                SM, SingleLocK);
+}
+
+
+PathDiagnosticLocation
+PathDiagnosticLocation::createEnd(const Stmt *S,
+                                  const SourceManager &SM,
+                                  LocationOrAnalysisDeclContext LAC) {
+  if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S))
+    return createEndBrace(CS, SM);
+  return PathDiagnosticLocation(getValidSourceLocation(S, LAC, /*End=*/true),
+                                SM, SingleLocK);
+}
+
+PathDiagnosticLocation
+  PathDiagnosticLocation::createOperatorLoc(const BinaryOperator *BO,
+                                            const SourceManager &SM) {
+  return PathDiagnosticLocation(BO->getOperatorLoc(), SM, SingleLocK);
+}
+
+PathDiagnosticLocation
+  PathDiagnosticLocation::createMemberLoc(const MemberExpr *ME,
+                                          const SourceManager &SM) {
+  return PathDiagnosticLocation(ME->getMemberLoc(), SM, SingleLocK);
+}
+
+PathDiagnosticLocation
+  PathDiagnosticLocation::createBeginBrace(const CompoundStmt *CS,
+                                           const SourceManager &SM) {
+  SourceLocation L = CS->getLBracLoc();
+  return PathDiagnosticLocation(L, SM, SingleLocK);
+}
+
+PathDiagnosticLocation
+  PathDiagnosticLocation::createEndBrace(const CompoundStmt *CS,
+                                         const SourceManager &SM) {
+  SourceLocation L = CS->getRBracLoc();
+  return PathDiagnosticLocation(L, SM, SingleLocK);
+}
+
+PathDiagnosticLocation
+  PathDiagnosticLocation::createDeclBegin(const LocationContext *LC,
+                                          const SourceManager &SM) {
+  // FIXME: Should handle CXXTryStmt if analyser starts supporting C++.
+  if (const CompoundStmt *CS =
+        dyn_cast_or_null<CompoundStmt>(LC->getDecl()->getBody()))
+    if (!CS->body_empty()) {
+      SourceLocation Loc = (*CS->body_begin())->getLocStart();
+      return PathDiagnosticLocation(Loc, SM, SingleLocK);
+    }
+
+  return PathDiagnosticLocation();
+}
+
+PathDiagnosticLocation
+  PathDiagnosticLocation::createDeclEnd(const LocationContext *LC,
+                                        const SourceManager &SM) {
+  SourceLocation L = LC->getDecl()->getBodyRBrace();
+  return PathDiagnosticLocation(L, SM, SingleLocK);
+}
+
+PathDiagnosticLocation
+  PathDiagnosticLocation::create(const ProgramPoint& P,
+                                 const SourceManager &SMng) {
+
+  const Stmt* S = 0;
+  if (Optional<BlockEdge> BE = P.getAs<BlockEdge>()) {
+    const CFGBlock *BSrc = BE->getSrc();
+    S = BSrc->getTerminatorCondition();
+  } else if (Optional<StmtPoint> SP = P.getAs<StmtPoint>()) {
+    S = SP->getStmt();
+    if (P.getAs<PostStmtPurgeDeadSymbols>())
+      return PathDiagnosticLocation::createEnd(S, SMng, P.getLocationContext());
+  } else if (Optional<PostInitializer> PIP = P.getAs<PostInitializer>()) {
+    return PathDiagnosticLocation(PIP->getInitializer()->getSourceLocation(),
+                                  SMng);
+  } else if (Optional<PostImplicitCall> PIE = P.getAs<PostImplicitCall>()) {
+    return PathDiagnosticLocation(PIE->getLocation(), SMng);
+  } else if (Optional<CallEnter> CE = P.getAs<CallEnter>()) {
+    return getLocationForCaller(CE->getCalleeContext(),
+                                CE->getLocationContext(),
+                                SMng);
+  } else if (Optional<CallExitEnd> CEE = P.getAs<CallExitEnd>()) {
+    return getLocationForCaller(CEE->getCalleeContext(),
+                                CEE->getLocationContext(),
+                                SMng);
+  } else {
+    llvm_unreachable("Unexpected ProgramPoint");
+  }
+
+  return PathDiagnosticLocation(S, SMng, P.getLocationContext());
+}
+
+const Stmt *PathDiagnosticLocation::getStmt(const ExplodedNode *N) {
+  ProgramPoint P = N->getLocation();
+  if (Optional<StmtPoint> SP = P.getAs<StmtPoint>())
+    return SP->getStmt();
+  if (Optional<BlockEdge> BE = P.getAs<BlockEdge>())
+    return BE->getSrc()->getTerminator();
+  if (Optional<CallEnter> CE = P.getAs<CallEnter>())
+    return CE->getCallExpr();
+  if (Optional<CallExitEnd> CEE = P.getAs<CallExitEnd>())
+    return CEE->getCalleeContext()->getCallSite();
+  if (Optional<PostInitializer> PIPP = P.getAs<PostInitializer>())
+    return PIPP->getInitializer()->getInit();
+
+  return 0;
+}
+
+const Stmt *PathDiagnosticLocation::getNextStmt(const ExplodedNode *N) {
+  for (N = N->getFirstSucc(); N; N = N->getFirstSucc()) {
+    if (const Stmt *S = getStmt(N)) {
+      // Check if the statement is '?' or '&&'/'||'.  These are "merges",
+      // not actual statement points.
+      switch (S->getStmtClass()) {
+        case Stmt::ChooseExprClass:
+        case Stmt::BinaryConditionalOperatorClass:
+        case Stmt::ConditionalOperatorClass:
+          continue;
+        case Stmt::BinaryOperatorClass: {
+          BinaryOperatorKind Op = cast<BinaryOperator>(S)->getOpcode();
+          if (Op == BO_LAnd || Op == BO_LOr)
+            continue;
+          break;
+        }
+        default:
+          break;
+      }
+      // We found the statement, so return it.
+      return S;
+    }
+  }
+
+  return 0;
+}
+
+PathDiagnosticLocation
+  PathDiagnosticLocation::createEndOfPath(const ExplodedNode *N,
+                                          const SourceManager &SM) {
+  assert(N && "Cannot create a location with a null node.");
+  const Stmt *S = getStmt(N);
+
+  if (!S)
+    S = getNextStmt(N);
+
+  if (S) {
+    ProgramPoint P = N->getLocation();
+    const LocationContext *LC = N->getLocationContext();
+
+    // For member expressions, return the location of the '.' or '->'.
+    if (const MemberExpr *ME = dyn_cast<MemberExpr>(S))
+      return PathDiagnosticLocation::createMemberLoc(ME, SM);
+
+    // For binary operators, return the location of the operator.
+    if (const BinaryOperator *B = dyn_cast<BinaryOperator>(S))
+      return PathDiagnosticLocation::createOperatorLoc(B, SM);
+
+    if (P.getAs<PostStmtPurgeDeadSymbols>())
+      return PathDiagnosticLocation::createEnd(S, SM, LC);
+
+    if (S->getLocStart().isValid())
+      return PathDiagnosticLocation(S, SM, LC);
+    return PathDiagnosticLocation(getValidSourceLocation(S, LC), SM);
+  }
+
+  return createDeclEnd(N->getLocationContext(), SM);
+}
+
+PathDiagnosticLocation PathDiagnosticLocation::createSingleLocation(
+                                           const PathDiagnosticLocation &PDL) {
+  FullSourceLoc L = PDL.asLocation();
+  return PathDiagnosticLocation(L, L.getManager(), SingleLocK);
+}
+
+FullSourceLoc
+  PathDiagnosticLocation::genLocation(SourceLocation L,
+                                      LocationOrAnalysisDeclContext LAC) const {
+  assert(isValid());
+  // Note that we want a 'switch' here so that the compiler can warn us in
+  // case we add more cases.
+  switch (K) {
+    case SingleLocK:
+    case RangeK:
+      break;
+    case StmtK:
+      // Defensive checking.
+      if (!S)
+        break;
+      return FullSourceLoc(getValidSourceLocation(S, LAC),
+                           const_cast<SourceManager&>(*SM));
+    case DeclK:
+      // Defensive checking.
+      if (!D)
+        break;
+      return FullSourceLoc(D->getLocation(), const_cast<SourceManager&>(*SM));
+  }
+
+  return FullSourceLoc(L, const_cast<SourceManager&>(*SM));
+}
+
+PathDiagnosticRange
+  PathDiagnosticLocation::genRange(LocationOrAnalysisDeclContext LAC) const {
+  assert(isValid());
+  // Note that we want a 'switch' here so that the compiler can warn us in
+  // case we add more cases.
+  switch (K) {
+    case SingleLocK:
+      return PathDiagnosticRange(SourceRange(Loc,Loc), true);
+    case RangeK:
+      break;
+    case StmtK: {
+      const Stmt *S = asStmt();
+      switch (S->getStmtClass()) {
+        default:
+          break;
+        case Stmt::DeclStmtClass: {
+          const DeclStmt *DS = cast<DeclStmt>(S);
+          if (DS->isSingleDecl()) {
+            // Should always be the case, but we'll be defensive.
+            return SourceRange(DS->getLocStart(),
+                               DS->getSingleDecl()->getLocation());
+          }
+          break;
+        }
+          // FIXME: Provide better range information for different
+          //  terminators.
+        case Stmt::IfStmtClass:
+        case Stmt::WhileStmtClass:
+        case Stmt::DoStmtClass:
+        case Stmt::ForStmtClass:
+        case Stmt::ChooseExprClass:
+        case Stmt::IndirectGotoStmtClass:
+        case Stmt::SwitchStmtClass:
+        case Stmt::BinaryConditionalOperatorClass:
+        case Stmt::ConditionalOperatorClass:
+        case Stmt::ObjCForCollectionStmtClass: {
+          SourceLocation L = getValidSourceLocation(S, LAC);
+          return SourceRange(L, L);
+        }
+      }
+      SourceRange R = S->getSourceRange();
+      if (R.isValid())
+        return R;
+      break;  
+    }
+    case DeclK:
+      if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
+        return MD->getSourceRange();
+      if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+        if (Stmt *Body = FD->getBody())
+          return Body->getSourceRange();
+      }
+      else {
+        SourceLocation L = D->getLocation();
+        return PathDiagnosticRange(SourceRange(L, L), true);
+      }
+  }
+
+  return SourceRange(Loc,Loc);
+}
+
+void PathDiagnosticLocation::flatten() {
+  if (K == StmtK) {
+    K = RangeK;
+    S = 0;
+    D = 0;
+  }
+  else if (K == DeclK) {
+    K = SingleLocK;
+    S = 0;
+    D = 0;
+  }
+}
+
+//===----------------------------------------------------------------------===//
+// Manipulation of PathDiagnosticCallPieces.
+//===----------------------------------------------------------------------===//
+
+PathDiagnosticCallPiece *
+PathDiagnosticCallPiece::construct(const ExplodedNode *N,
+                                   const CallExitEnd &CE,
+                                   const SourceManager &SM) {
+  const Decl *caller = CE.getLocationContext()->getDecl();
+  PathDiagnosticLocation pos = getLocationForCaller(CE.getCalleeContext(),
+                                                    CE.getLocationContext(),
+                                                    SM);
+  return new PathDiagnosticCallPiece(caller, pos);
+}
+
+PathDiagnosticCallPiece *
+PathDiagnosticCallPiece::construct(PathPieces &path,
+                                   const Decl *caller) {
+  PathDiagnosticCallPiece *C = new PathDiagnosticCallPiece(path, caller);
+  path.clear();
+  path.push_front(C);
+  return C;
+}
+
+void PathDiagnosticCallPiece::setCallee(const CallEnter &CE,
+                                        const SourceManager &SM) {
+  const StackFrameContext *CalleeCtx = CE.getCalleeContext();
+  Callee = CalleeCtx->getDecl();
+
+  callEnterWithin = PathDiagnosticLocation::createBegin(Callee, SM);
+  callEnter = getLocationForCaller(CalleeCtx, CE.getLocationContext(), SM);
+}
+
+static inline void describeClass(raw_ostream &Out, const CXXRecordDecl *D,
+                                 StringRef Prefix = StringRef()) {
+  if (!D->getIdentifier())
+    return;
+  Out << Prefix << '\'' << *D << '\'';
+}
+
+static bool describeCodeDecl(raw_ostream &Out, const Decl *D,
+                             bool ExtendedDescription,
+                             StringRef Prefix = StringRef()) {
+  if (!D)
+    return false;
+
+  if (isa<BlockDecl>(D)) {
+    if (ExtendedDescription)
+      Out << Prefix << "anonymous block";
+    return ExtendedDescription;
+  }
+
+  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
+    Out << Prefix;
+    if (ExtendedDescription && !MD->isUserProvided()) {
+      if (MD->isExplicitlyDefaulted())
+        Out << "defaulted ";
+      else
+        Out << "implicit ";
+    }
+
+    if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(MD)) {
+      if (CD->isDefaultConstructor())
+        Out << "default ";
+      else if (CD->isCopyConstructor())
+        Out << "copy ";
+      else if (CD->isMoveConstructor())
+        Out << "move ";
+
+      Out << "constructor";
+      describeClass(Out, MD->getParent(), " for ");
+      
+    } else if (isa<CXXDestructorDecl>(MD)) {
+      if (!MD->isUserProvided()) {
+        Out << "destructor";
+        describeClass(Out, MD->getParent(), " for ");
+      } else {
+        // Use ~Foo for explicitly-written destructors.
+        Out << "'" << *MD << "'";
+      }
+
+    } else if (MD->isCopyAssignmentOperator()) {
+        Out << "copy assignment operator";
+        describeClass(Out, MD->getParent(), " for ");
+
+    } else if (MD->isMoveAssignmentOperator()) {
+        Out << "move assignment operator";
+        describeClass(Out, MD->getParent(), " for ");
+
+    } else {
+      if (MD->getParent()->getIdentifier())
+        Out << "'" << *MD->getParent() << "::" << *MD << "'";
+      else
+        Out << "'" << *MD << "'";
+    }
+
+    return true;
+  }
+
+  Out << Prefix << '\'' << cast<NamedDecl>(*D) << '\'';
+  return true;
+}
+
+IntrusiveRefCntPtr<PathDiagnosticEventPiece>
+PathDiagnosticCallPiece::getCallEnterEvent() const {
+  if (!Callee)
+    return 0;  
+
+  SmallString<256> buf;
+  llvm::raw_svector_ostream Out(buf);
+
+  Out << "Calling ";
+  describeCodeDecl(Out, Callee, /*ExtendedDescription=*/true);
+
+  assert(callEnter.asLocation().isValid());
+  return new PathDiagnosticEventPiece(callEnter, Out.str());
+}
+
+IntrusiveRefCntPtr<PathDiagnosticEventPiece>
+PathDiagnosticCallPiece::getCallEnterWithinCallerEvent() const {
+  if (!callEnterWithin.asLocation().isValid())
+    return 0;
+  if (Callee->isImplicit())
+    return 0;
+  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Callee))
+    if (MD->isDefaulted())
+      return 0;
+
+  SmallString<256> buf;
+  llvm::raw_svector_ostream Out(buf);
+
+  Out << "Entered call";
+  describeCodeDecl(Out, Caller, /*ExtendedDescription=*/false, " from ");
+
+  return new PathDiagnosticEventPiece(callEnterWithin, Out.str());
+}
+
+IntrusiveRefCntPtr<PathDiagnosticEventPiece>
+PathDiagnosticCallPiece::getCallExitEvent() const {
+  if (NoExit)
+    return 0;
+
+  SmallString<256> buf;
+  llvm::raw_svector_ostream Out(buf);
+
+  if (!CallStackMessage.empty()) {
+    Out << CallStackMessage;
+  } else {
+    bool DidDescribe = describeCodeDecl(Out, Callee,
+                                        /*ExtendedDescription=*/false,
+                                        "Returning from ");
+    if (!DidDescribe)
+      Out << "Returning to caller";
+  }
+
+  assert(callReturn.asLocation().isValid());
+  return new PathDiagnosticEventPiece(callReturn, Out.str());
+}
+
+static void compute_path_size(const PathPieces &pieces, unsigned &size) {
+  for (PathPieces::const_iterator it = pieces.begin(),
+                                  et = pieces.end(); it != et; ++it) {
+    const PathDiagnosticPiece *piece = it->getPtr();
+    if (const PathDiagnosticCallPiece *cp = 
+        dyn_cast<PathDiagnosticCallPiece>(piece)) {
+      compute_path_size(cp->path, size);
+    }
+    else
+      ++size;
+  }
+}
+
+unsigned PathDiagnostic::full_size() {
+  unsigned size = 0;
+  compute_path_size(path, size);
+  return size;
+}
+
+//===----------------------------------------------------------------------===//
+// FoldingSet profiling methods.
+//===----------------------------------------------------------------------===//
+
+void PathDiagnosticLocation::Profile(llvm::FoldingSetNodeID &ID) const {
+  ID.AddInteger(Range.getBegin().getRawEncoding());
+  ID.AddInteger(Range.getEnd().getRawEncoding());
+  ID.AddInteger(Loc.getRawEncoding());
+  return;
+}
+
+void PathDiagnosticPiece::Profile(llvm::FoldingSetNodeID &ID) const {
+  ID.AddInteger((unsigned) getKind());
+  ID.AddString(str);
+  // FIXME: Add profiling support for code hints.
+  ID.AddInteger((unsigned) getDisplayHint());
+  ArrayRef<SourceRange> Ranges = getRanges();
+  for (ArrayRef<SourceRange>::iterator I = Ranges.begin(), E = Ranges.end();
+                                        I != E; ++I) {
+    ID.AddInteger(I->getBegin().getRawEncoding());
+    ID.AddInteger(I->getEnd().getRawEncoding());
+  }  
+}
+
+void PathDiagnosticCallPiece::Profile(llvm::FoldingSetNodeID &ID) const {
+  PathDiagnosticPiece::Profile(ID);
+  for (PathPieces::const_iterator it = path.begin(), 
+       et = path.end(); it != et; ++it) {
+    ID.Add(**it);
+  }
+}
+
+void PathDiagnosticSpotPiece::Profile(llvm::FoldingSetNodeID &ID) const {
+  PathDiagnosticPiece::Profile(ID);
+  ID.Add(Pos);
+}
+
+void PathDiagnosticControlFlowPiece::Profile(llvm::FoldingSetNodeID &ID) const {
+  PathDiagnosticPiece::Profile(ID);
+  for (const_iterator I = begin(), E = end(); I != E; ++I)
+    ID.Add(*I);
+}
+
+void PathDiagnosticMacroPiece::Profile(llvm::FoldingSetNodeID &ID) const {
+  PathDiagnosticSpotPiece::Profile(ID);
+  for (PathPieces::const_iterator I = subPieces.begin(), E = subPieces.end();
+       I != E; ++I)
+    ID.Add(**I);
+}
+
+void PathDiagnostic::Profile(llvm::FoldingSetNodeID &ID) const {
+  ID.Add(getLocation());
+  ID.AddString(BugType);
+  ID.AddString(VerboseDesc);
+  ID.AddString(Category);
+}
+
+void PathDiagnostic::FullProfile(llvm::FoldingSetNodeID &ID) const {
+  Profile(ID);
+  for (PathPieces::const_iterator I = path.begin(), E = path.end(); I != E; ++I)
+    ID.Add(**I);
+  for (meta_iterator I = meta_begin(), E = meta_end(); I != E; ++I)
+    ID.AddString(*I);
+}
+
+StackHintGenerator::~StackHintGenerator() {}
+
+std::string StackHintGeneratorForSymbol::getMessage(const ExplodedNode *N){
+  ProgramPoint P = N->getLocation();
+  CallExitEnd CExit = P.castAs<CallExitEnd>();
+
+  // FIXME: Use CallEvent to abstract this over all calls.
+  const Stmt *CallSite = CExit.getCalleeContext()->getCallSite();
+  const CallExpr *CE = dyn_cast_or_null<CallExpr>(CallSite);
+  if (!CE)
+    return "";
+
+  if (!N)
+    return getMessageForSymbolNotFound();
+
+  // Check if one of the parameters are set to the interesting symbol.
+  ProgramStateRef State = N->getState();
+  const LocationContext *LCtx = N->getLocationContext();
+  unsigned ArgIndex = 0;
+  for (CallExpr::const_arg_iterator I = CE->arg_begin(),
+                                    E = CE->arg_end(); I != E; ++I, ++ArgIndex){
+    SVal SV = State->getSVal(*I, LCtx);
+
+    // Check if the variable corresponding to the symbol is passed by value.
+    SymbolRef AS = SV.getAsLocSymbol();
+    if (AS == Sym) {
+      return getMessageForArg(*I, ArgIndex);
+    }
+
+    // Check if the parameter is a pointer to the symbol.
+    if (Optional<loc::MemRegionVal> Reg = SV.getAs<loc::MemRegionVal>()) {
+      SVal PSV = State->getSVal(Reg->getRegion());
+      SymbolRef AS = PSV.getAsLocSymbol();
+      if (AS == Sym) {
+        return getMessageForArg(*I, ArgIndex);
+      }
+    }
+  }
+
+  // Check if we are returning the interesting symbol.
+  SVal SV = State->getSVal(CE, LCtx);
+  SymbolRef RetSym = SV.getAsLocSymbol();
+  if (RetSym == Sym) {
+    return getMessageForReturn(CE);
+  }
+
+  return getMessageForSymbolNotFound();
+}
+
+std::string StackHintGeneratorForSymbol::getMessageForArg(const Expr *ArgE,
+                                                          unsigned ArgIndex) {
+  // Printed parameters start at 1, not 0.
+  ++ArgIndex;
+
+  SmallString<200> buf;
+  llvm::raw_svector_ostream os(buf);
+
+  os << Msg << " via " << ArgIndex << llvm::getOrdinalSuffix(ArgIndex)
+     << " parameter";
+
+  return os.str();
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
new file mode 100644
index 0000000..8509555
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
@@ -0,0 +1,564 @@
+//===--- PlistDiagnostics.cpp - Plist Diagnostics for Paths -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines the PlistDiagnostics object.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/Version.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+using namespace ento;
+
+typedef llvm::DenseMap<FileID, unsigned> FIDMap;
+
+
+namespace {
+  class PlistDiagnostics : public PathDiagnosticConsumer {
+    const std::string OutputFile;
+    const LangOptions &LangOpts;
+    const bool SupportsCrossFileDiagnostics;
+  public:
+    PlistDiagnostics(AnalyzerOptions &AnalyzerOpts,
+                     const std::string& prefix,
+                     const LangOptions &LangOpts,
+                     bool supportsMultipleFiles);
+
+    virtual ~PlistDiagnostics() {}
+
+    void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
+                              FilesMade *filesMade);
+    
+    virtual StringRef getName() const {
+      return "PlistDiagnostics";
+    }
+
+    PathGenerationScheme getGenerationScheme() const { return Extensive; }
+    bool supportsLogicalOpControlFlow() const { return true; }
+    bool supportsAllBlockEdges() const { return true; }
+    virtual bool supportsCrossFileDiagnostics() const {
+      return SupportsCrossFileDiagnostics;
+    }
+  };
+} // end anonymous namespace
+
+PlistDiagnostics::PlistDiagnostics(AnalyzerOptions &AnalyzerOpts,
+                                   const std::string& output,
+                                   const LangOptions &LO,
+                                   bool supportsMultipleFiles)
+  : OutputFile(output),
+    LangOpts(LO),
+    SupportsCrossFileDiagnostics(supportsMultipleFiles) {}
+
+void ento::createPlistDiagnosticConsumer(AnalyzerOptions &AnalyzerOpts,
+                                         PathDiagnosticConsumers &C,
+                                         const std::string& s,
+                                         const Preprocessor &PP) {
+  C.push_back(new PlistDiagnostics(AnalyzerOpts, s,
+                                   PP.getLangOpts(), false));
+}
+
+void ento::createPlistMultiFileDiagnosticConsumer(AnalyzerOptions &AnalyzerOpts,
+                                                  PathDiagnosticConsumers &C,
+                                                  const std::string &s,
+                                                  const Preprocessor &PP) {
+  C.push_back(new PlistDiagnostics(AnalyzerOpts, s,
+                                   PP.getLangOpts(), true));
+}
+
+static void AddFID(FIDMap &FIDs, SmallVectorImpl<FileID> &V,
+                   const SourceManager* SM, SourceLocation L) {
+
+  FileID FID = SM->getFileID(SM->getExpansionLoc(L));
+  FIDMap::iterator I = FIDs.find(FID);
+  if (I != FIDs.end()) return;
+  FIDs[FID] = V.size();
+  V.push_back(FID);
+}
+
+static unsigned GetFID(const FIDMap& FIDs, const SourceManager &SM,
+                       SourceLocation L) {
+  FileID FID = SM.getFileID(SM.getExpansionLoc(L));
+  FIDMap::const_iterator I = FIDs.find(FID);
+  assert(I != FIDs.end());
+  return I->second;
+}
+
+static raw_ostream &Indent(raw_ostream &o, const unsigned indent) {
+  for (unsigned i = 0; i < indent; ++i) o << ' ';
+  return o;
+}
+
+static void EmitLocation(raw_ostream &o, const SourceManager &SM,
+                         const LangOptions &LangOpts,
+                         SourceLocation L, const FIDMap &FM,
+                         unsigned indent, bool extend = false) {
+
+  FullSourceLoc Loc(SM.getExpansionLoc(L), const_cast<SourceManager&>(SM));
+
+  // Add in the length of the token, so that we cover multi-char tokens.
+  unsigned offset =
+    extend ? Lexer::MeasureTokenLength(Loc, SM, LangOpts) - 1 : 0;
+
+  Indent(o, indent) << "<dict>\n";
+  Indent(o, indent) << " <key>line</key><integer>"
+                    << Loc.getExpansionLineNumber() << "</integer>\n";
+  Indent(o, indent) << " <key>col</key><integer>"
+                    << Loc.getExpansionColumnNumber() + offset << "</integer>\n";
+  Indent(o, indent) << " <key>file</key><integer>"
+                    << GetFID(FM, SM, Loc) << "</integer>\n";
+  Indent(o, indent) << "</dict>\n";
+}
+
+static void EmitLocation(raw_ostream &o, const SourceManager &SM,
+                         const LangOptions &LangOpts,
+                         const PathDiagnosticLocation &L, const FIDMap& FM,
+                         unsigned indent, bool extend = false) {
+  EmitLocation(o, SM, LangOpts, L.asLocation(), FM, indent, extend);
+}
+
+static void EmitRange(raw_ostream &o, const SourceManager &SM,
+                      const LangOptions &LangOpts,
+                      PathDiagnosticRange R, const FIDMap &FM,
+                      unsigned indent) {
+  Indent(o, indent) << "<array>\n";
+  EmitLocation(o, SM, LangOpts, R.getBegin(), FM, indent+1);
+  EmitLocation(o, SM, LangOpts, R.getEnd(), FM, indent+1, !R.isPoint);
+  Indent(o, indent) << "</array>\n";
+}
+
+static raw_ostream &EmitString(raw_ostream &o, StringRef s) {
+  o << "<string>";
+  for (StringRef::const_iterator I = s.begin(), E = s.end(); I != E; ++I) {
+    char c = *I;
+    switch (c) {
+    default:   o << c; break;
+    case '&':  o << "&amp;"; break;
+    case '<':  o << "&lt;"; break;
+    case '>':  o << "&gt;"; break;
+    case '\'': o << "&apos;"; break;
+    case '\"': o << "&quot;"; break;
+    }
+  }
+  o << "</string>";
+  return o;
+}
+
+static void ReportControlFlow(raw_ostream &o,
+                              const PathDiagnosticControlFlowPiece& P,
+                              const FIDMap& FM,
+                              const SourceManager &SM,
+                              const LangOptions &LangOpts,
+                              unsigned indent) {
+
+  Indent(o, indent) << "<dict>\n";
+  ++indent;
+
+  Indent(o, indent) << "<key>kind</key><string>control</string>\n";
+
+  // Emit edges.
+  Indent(o, indent) << "<key>edges</key>\n";
+  ++indent;
+  Indent(o, indent) << "<array>\n";
+  ++indent;
+  for (PathDiagnosticControlFlowPiece::const_iterator I=P.begin(), E=P.end();
+       I!=E; ++I) {
+    Indent(o, indent) << "<dict>\n";
+    ++indent;
+
+    // Make the ranges of the start and end point self-consistent with adjacent edges
+    // by forcing to use only the beginning of the range.  This simplifies the layout
+    // logic for clients.
+    Indent(o, indent) << "<key>start</key>\n";
+    SourceLocation StartEdge = I->getStart().asRange().getBegin();
+    EmitRange(o, SM, LangOpts, SourceRange(StartEdge, StartEdge), FM, indent+1);
+
+    Indent(o, indent) << "<key>end</key>\n";
+    SourceLocation EndEdge = I->getEnd().asRange().getBegin();
+    EmitRange(o, SM, LangOpts, SourceRange(EndEdge, EndEdge), FM, indent+1);
+
+    --indent;
+    Indent(o, indent) << "</dict>\n";
+  }
+  --indent;
+  Indent(o, indent) << "</array>\n";
+  --indent;
+
+  // Output any helper text.
+  const std::string& s = P.getString();
+  if (!s.empty()) {
+    Indent(o, indent) << "<key>alternate</key>";
+    EmitString(o, s) << '\n';
+  }
+
+  --indent;
+  Indent(o, indent) << "</dict>\n";
+}
+
+static void ReportEvent(raw_ostream &o, const PathDiagnosticPiece& P,
+                        const FIDMap& FM,
+                        const SourceManager &SM,
+                        const LangOptions &LangOpts,
+                        unsigned indent,
+                        unsigned depth) {
+
+  Indent(o, indent) << "<dict>\n";
+  ++indent;
+
+  Indent(o, indent) << "<key>kind</key><string>event</string>\n";
+
+  // Output the location.
+  FullSourceLoc L = P.getLocation().asLocation();
+
+  Indent(o, indent) << "<key>location</key>\n";
+  EmitLocation(o, SM, LangOpts, L, FM, indent);
+
+  // Output the ranges (if any).
+  ArrayRef<SourceRange> Ranges = P.getRanges();
+
+  if (!Ranges.empty()) {
+    Indent(o, indent) << "<key>ranges</key>\n";
+    Indent(o, indent) << "<array>\n";
+    ++indent;
+    for (ArrayRef<SourceRange>::iterator I = Ranges.begin(), E = Ranges.end();
+         I != E; ++I) {
+      EmitRange(o, SM, LangOpts, *I, FM, indent+1);
+    }
+    --indent;
+    Indent(o, indent) << "</array>\n";
+  }
+  
+  // Output the call depth.
+  Indent(o, indent) << "<key>depth</key>"
+                    << "<integer>" << depth << "</integer>\n";
+
+  // Output the text.
+  assert(!P.getString().empty());
+  Indent(o, indent) << "<key>extended_message</key>\n";
+  Indent(o, indent);
+  EmitString(o, P.getString()) << '\n';
+
+  // Output the short text.
+  // FIXME: Really use a short string.
+  Indent(o, indent) << "<key>message</key>\n";
+  Indent(o, indent);
+  EmitString(o, P.getString()) << '\n';
+  
+  // Finish up.
+  --indent;
+  Indent(o, indent); o << "</dict>\n";
+}
+
+static void ReportPiece(raw_ostream &o,
+                        const PathDiagnosticPiece &P,
+                        const FIDMap& FM, const SourceManager &SM,
+                        const LangOptions &LangOpts,
+                        unsigned indent,
+                        unsigned depth,
+                        bool includeControlFlow);
+
+static void ReportCall(raw_ostream &o,
+                       const PathDiagnosticCallPiece &P,
+                       const FIDMap& FM, const SourceManager &SM,
+                       const LangOptions &LangOpts,
+                       unsigned indent,
+                       unsigned depth) {
+  
+  IntrusiveRefCntPtr<PathDiagnosticEventPiece> callEnter =
+    P.getCallEnterEvent();  
+
+  if (callEnter)
+    ReportPiece(o, *callEnter, FM, SM, LangOpts, indent, depth, true);
+
+  IntrusiveRefCntPtr<PathDiagnosticEventPiece> callEnterWithinCaller =
+    P.getCallEnterWithinCallerEvent();
+  
+  ++depth;
+  
+  if (callEnterWithinCaller)
+    ReportPiece(o, *callEnterWithinCaller, FM, SM, LangOpts,
+                indent, depth, true);
+  
+  for (PathPieces::const_iterator I = P.path.begin(), E = P.path.end();I!=E;++I)
+    ReportPiece(o, **I, FM, SM, LangOpts, indent, depth, true);
+
+  --depth;
+  
+  IntrusiveRefCntPtr<PathDiagnosticEventPiece> callExit =
+    P.getCallExitEvent();
+
+  if (callExit)
+    ReportPiece(o, *callExit, FM, SM, LangOpts, indent, depth, true);
+}
+
+static void ReportMacro(raw_ostream &o,
+                        const PathDiagnosticMacroPiece& P,
+                        const FIDMap& FM, const SourceManager &SM,
+                        const LangOptions &LangOpts,
+                        unsigned indent,
+                        unsigned depth) {
+
+  for (PathPieces::const_iterator I = P.subPieces.begin(), E=P.subPieces.end();
+       I!=E; ++I) {
+    ReportPiece(o, **I, FM, SM, LangOpts, indent, depth, false);
+  }
+}
+
+static void ReportDiag(raw_ostream &o, const PathDiagnosticPiece& P,
+                       const FIDMap& FM, const SourceManager &SM,
+                       const LangOptions &LangOpts) {
+  ReportPiece(o, P, FM, SM, LangOpts, 4, 0, true);
+}
+
+static void ReportPiece(raw_ostream &o,
+                        const PathDiagnosticPiece &P,
+                        const FIDMap& FM, const SourceManager &SM,
+                        const LangOptions &LangOpts,
+                        unsigned indent,
+                        unsigned depth,
+                        bool includeControlFlow) {
+  switch (P.getKind()) {
+    case PathDiagnosticPiece::ControlFlow:
+      if (includeControlFlow)
+        ReportControlFlow(o, cast<PathDiagnosticControlFlowPiece>(P), FM, SM,
+                          LangOpts, indent);
+      break;
+    case PathDiagnosticPiece::Call:
+      ReportCall(o, cast<PathDiagnosticCallPiece>(P), FM, SM, LangOpts,
+                 indent, depth);
+      break;
+    case PathDiagnosticPiece::Event:
+      ReportEvent(o, cast<PathDiagnosticSpotPiece>(P), FM, SM, LangOpts,
+                  indent, depth);
+      break;
+    case PathDiagnosticPiece::Macro:
+      ReportMacro(o, cast<PathDiagnosticMacroPiece>(P), FM, SM, LangOpts,
+                  indent, depth);
+      break;
+  }
+}
+
+void PlistDiagnostics::FlushDiagnosticsImpl(
+                                    std::vector<const PathDiagnostic *> &Diags,
+                                    FilesMade *filesMade) {
+  // Build up a set of FIDs that we use by scanning the locations and
+  // ranges of the diagnostics.
+  FIDMap FM;
+  SmallVector<FileID, 10> Fids;
+  const SourceManager* SM = 0;
+
+  if (!Diags.empty())
+    SM = &(*(*Diags.begin())->path.begin())->getLocation().getManager();
+
+  
+  for (std::vector<const PathDiagnostic*>::iterator DI = Diags.begin(),
+       DE = Diags.end(); DI != DE; ++DI) {
+
+    const PathDiagnostic *D = *DI;
+
+    SmallVector<const PathPieces *, 5> WorkList;
+    WorkList.push_back(&D->path);
+
+    while (!WorkList.empty()) {
+      const PathPieces &path = *WorkList.back();
+      WorkList.pop_back();
+    
+      for (PathPieces::const_iterator I = path.begin(), E = path.end();
+           I!=E; ++I) {
+        const PathDiagnosticPiece *piece = I->getPtr();
+        AddFID(FM, Fids, SM, piece->getLocation().asLocation());
+        ArrayRef<SourceRange> Ranges = piece->getRanges();
+        for (ArrayRef<SourceRange>::iterator I = Ranges.begin(),
+                                             E = Ranges.end(); I != E; ++I) {
+          AddFID(FM, Fids, SM, I->getBegin());
+          AddFID(FM, Fids, SM, I->getEnd());
+        }
+
+        if (const PathDiagnosticCallPiece *call =
+            dyn_cast<PathDiagnosticCallPiece>(piece)) {
+          IntrusiveRefCntPtr<PathDiagnosticEventPiece>
+            callEnterWithin = call->getCallEnterWithinCallerEvent();
+          if (callEnterWithin)
+            AddFID(FM, Fids, SM, callEnterWithin->getLocation().asLocation());
+
+          WorkList.push_back(&call->path);
+        }
+        else if (const PathDiagnosticMacroPiece *macro =
+                 dyn_cast<PathDiagnosticMacroPiece>(piece)) {
+          WorkList.push_back(&macro->subPieces);
+        }
+      }
+    }
+  }
+
+  // Open the file.
+  std::string ErrMsg;
+  llvm::raw_fd_ostream o(OutputFile.c_str(), ErrMsg);
+  if (!ErrMsg.empty()) {
+    llvm::errs() << "warning: could not create file: " << OutputFile << '\n';
+    return;
+  }
+
+  // Write the plist header.
+  o << "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
+  "<!DOCTYPE plist PUBLIC \"-//Apple Computer//DTD PLIST 1.0//EN\" "
+  "\"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n"
+  "<plist version=\"1.0\">\n";
+
+  // Write the root object: a <dict> containing...
+  //  - "clang_version", the string representation of clang version
+  //  - "files", an <array> mapping from FIDs to file names
+  //  - "diagnostics", an <array> containing the path diagnostics
+  o << "<dict>\n" <<
+       " <key>clang_version</key>\n";
+  EmitString(o, getClangFullVersion()) << '\n';
+  o << " <key>files</key>\n"
+       " <array>\n";
+
+  for (SmallVectorImpl<FileID>::iterator I=Fids.begin(), E=Fids.end();
+       I!=E; ++I) {
+    o << "  ";
+    EmitString(o, SM->getFileEntryForID(*I)->getName()) << '\n';
+  }
+
+  o << " </array>\n"
+       " <key>diagnostics</key>\n"
+       " <array>\n";
+
+  for (std::vector<const PathDiagnostic*>::iterator DI=Diags.begin(),
+       DE = Diags.end(); DI!=DE; ++DI) {
+
+    o << "  <dict>\n"
+         "   <key>path</key>\n";
+
+    const PathDiagnostic *D = *DI;
+
+    o << "   <array>\n";
+
+    for (PathPieces::const_iterator I = D->path.begin(), E = D->path.end(); 
+         I != E; ++I)
+      ReportDiag(o, **I, FM, *SM, LangOpts);
+
+    o << "   </array>\n";
+
+    // Output the bug type and bug category.
+    o << "   <key>description</key>";
+    EmitString(o, D->getShortDescription()) << '\n';
+    o << "   <key>category</key>";
+    EmitString(o, D->getCategory()) << '\n';
+    o << "   <key>type</key>";
+    EmitString(o, D->getBugType()) << '\n';
+    
+    // Output information about the semantic context where
+    // the issue occurred.
+    if (const Decl *DeclWithIssue = D->getDeclWithIssue()) {
+      // FIXME: handle blocks, which have no name.
+      if (const NamedDecl *ND = dyn_cast<NamedDecl>(DeclWithIssue)) {
+        StringRef declKind;
+        switch (ND->getKind()) {
+          case Decl::CXXRecord:
+            declKind = "C++ class";
+            break;
+          case Decl::CXXMethod:
+            declKind = "C++ method";
+            break;
+          case Decl::ObjCMethod:
+            declKind = "Objective-C method";
+            break;
+          case Decl::Function:
+            declKind = "function";
+            break;
+          default:
+            break;
+        }
+        if (!declKind.empty()) {
+          const std::string &declName = ND->getDeclName().getAsString();
+          o << "  <key>issue_context_kind</key>";
+          EmitString(o, declKind) << '\n';
+          o << "  <key>issue_context</key>";
+          EmitString(o, declName) << '\n';
+        }
+
+        // Output the bug hash for issue unique-ing. Currently, it's just an
+        // offset from the beginning of the function.
+        if (const Stmt *Body = DeclWithIssue->getBody()) {
+          
+          // If the bug uniqueing location exists, use it for the hash.
+          // For example, this ensures that two leaks reported on the same line
+          // will have different issue_hashes and that the hash will identify
+          // the leak location even after code is added between the allocation
+          // site and the end of scope (leak report location).
+          PathDiagnosticLocation UPDLoc = D->getUniqueingLoc();
+          if (UPDLoc.isValid()) {
+            FullSourceLoc UL(SM->getExpansionLoc(UPDLoc.asLocation()),
+                             *SM);
+            FullSourceLoc UFunL(SM->getExpansionLoc(
+              D->getUniqueingDecl()->getBody()->getLocStart()), *SM);
+            o << "  <key>issue_hash</key><string>"
+              << UL.getExpansionLineNumber() - UFunL.getExpansionLineNumber()
+              << "</string>\n";
+
+          // Otherwise, use the location on which the bug is reported.
+          } else {
+            FullSourceLoc L(SM->getExpansionLoc(D->getLocation().asLocation()),
+                            *SM);
+            FullSourceLoc FunL(SM->getExpansionLoc(Body->getLocStart()), *SM);
+            o << "  <key>issue_hash</key><string>"
+              << L.getExpansionLineNumber() - FunL.getExpansionLineNumber()
+              << "</string>\n";
+          }
+
+        }
+      }
+    }
+
+    // Output the location of the bug.
+    o << "  <key>location</key>\n";
+    EmitLocation(o, *SM, LangOpts, D->getLocation(), FM, 2);
+
+    // Output the diagnostic to the sub-diagnostic client, if any.
+    if (!filesMade->empty()) {
+      StringRef lastName;
+      PDFileEntry::ConsumerFiles *files = filesMade->getFiles(*D);
+      if (files) {
+        for (PDFileEntry::ConsumerFiles::const_iterator CI = files->begin(),
+                CE = files->end(); CI != CE; ++CI) {
+          StringRef newName = CI->first;
+          if (newName != lastName) {
+            if (!lastName.empty()) {
+              o << "  </array>\n";
+            }
+            lastName = newName;
+            o <<  "  <key>" << lastName << "_files</key>\n";
+            o << "  <array>\n";
+          }
+          o << "   <string>" << CI->second << "</string>\n";
+        }
+        o << "  </array>\n";
+      }
+    }
+
+    // Close up the entry.
+    o << "  </dict>\n";
+  }
+
+  o << " </array>\n";
+
+  // Finish.
+  o << "</dict>\n</plist>";  
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/ProgramState.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
new file mode 100644
index 0000000..653b69b
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
@@ -0,0 +1,806 @@
+//= ProgramState.cpp - Path-Sensitive "State" for tracking values --*- C++ -*--=
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file implements ProgramState and ProgramStateManager.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/TaintManager.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace clang { namespace  ento {
+/// Increments the number of times this state is referenced.
+
+void ProgramStateRetain(const ProgramState *state) {
+  ++const_cast<ProgramState*>(state)->refCount;
+}
+
+/// Decrement the number of times this state is referenced.
+void ProgramStateRelease(const ProgramState *state) {
+  assert(state->refCount > 0);
+  ProgramState *s = const_cast<ProgramState*>(state);
+  if (--s->refCount == 0) {
+    ProgramStateManager &Mgr = s->getStateManager();
+    Mgr.StateSet.RemoveNode(s);
+    s->~ProgramState();    
+    Mgr.freeStates.push_back(s);
+  }
+}
+}}
+
+ProgramState::ProgramState(ProgramStateManager *mgr, const Environment& env,
+                 StoreRef st, GenericDataMap gdm)
+  : stateMgr(mgr),
+    Env(env),
+    store(st.getStore()),
+    GDM(gdm),
+    refCount(0) {
+  stateMgr->getStoreManager().incrementReferenceCount(store);
+}
+
+ProgramState::ProgramState(const ProgramState &RHS)
+    : llvm::FoldingSetNode(),
+      stateMgr(RHS.stateMgr),
+      Env(RHS.Env),
+      store(RHS.store),
+      GDM(RHS.GDM),
+      refCount(0) {
+  stateMgr->getStoreManager().incrementReferenceCount(store);
+}
+
+ProgramState::~ProgramState() {
+  if (store)
+    stateMgr->getStoreManager().decrementReferenceCount(store);
+}
+
+ProgramStateManager::ProgramStateManager(ASTContext &Ctx,
+                                         StoreManagerCreator CreateSMgr,
+                                         ConstraintManagerCreator CreateCMgr,
+                                         llvm::BumpPtrAllocator &alloc,
+                                         SubEngine *SubEng)
+  : Eng(SubEng), EnvMgr(alloc), GDMFactory(alloc),
+    svalBuilder(createSimpleSValBuilder(alloc, Ctx, *this)),
+    CallEventMgr(new CallEventManager(alloc)), Alloc(alloc) {
+  StoreMgr.reset((*CreateSMgr)(*this));
+  ConstraintMgr.reset((*CreateCMgr)(*this, SubEng));
+}
+
+
+ProgramStateManager::~ProgramStateManager() {
+  for (GDMContextsTy::iterator I=GDMContexts.begin(), E=GDMContexts.end();
+       I!=E; ++I)
+    I->second.second(I->second.first);
+}
+
+ProgramStateRef 
+ProgramStateManager::removeDeadBindings(ProgramStateRef state,
+                                   const StackFrameContext *LCtx,
+                                   SymbolReaper& SymReaper) {
+
+  // This code essentially performs a "mark-and-sweep" of the VariableBindings.
+  // The roots are any Block-level exprs and Decls that our liveness algorithm
+  // tells us are live.  We then see what Decls they may reference, and keep
+  // those around.  This code more than likely can be made faster, and the
+  // frequency of which this method is called should be experimented with
+  // for optimum performance.
+  ProgramState NewState = *state;
+
+  NewState.Env = EnvMgr.removeDeadBindings(NewState.Env, SymReaper, state);
+
+  // Clean up the store.
+  StoreRef newStore = StoreMgr->removeDeadBindings(NewState.getStore(), LCtx,
+                                                   SymReaper);
+  NewState.setStore(newStore);
+  SymReaper.setReapedStore(newStore);
+
+  ProgramStateRef Result = getPersistentState(NewState);
+  return ConstraintMgr->removeDeadBindings(Result, SymReaper);
+}
+
+ProgramStateRef ProgramState::bindLoc(Loc LV, SVal V, bool notifyChanges) const {
+  ProgramStateManager &Mgr = getStateManager();
+  ProgramStateRef newState = makeWithStore(Mgr.StoreMgr->Bind(getStore(), 
+                                                             LV, V));
+  const MemRegion *MR = LV.getAsRegion();
+  if (MR && Mgr.getOwningEngine() && notifyChanges)
+    return Mgr.getOwningEngine()->processRegionChange(newState, MR);
+
+  return newState;
+}
+
+ProgramStateRef ProgramState::bindDefault(SVal loc, SVal V) const {
+  ProgramStateManager &Mgr = getStateManager();
+  const MemRegion *R = loc.castAs<loc::MemRegionVal>().getRegion();
+  const StoreRef &newStore = Mgr.StoreMgr->BindDefault(getStore(), R, V);
+  ProgramStateRef new_state = makeWithStore(newStore);
+  return Mgr.getOwningEngine() ? 
+           Mgr.getOwningEngine()->processRegionChange(new_state, R) : 
+           new_state;
+}
+
+typedef ArrayRef<const MemRegion *> RegionList;
+typedef ArrayRef<SVal> ValueList;
+
+ProgramStateRef 
+ProgramState::invalidateRegions(RegionList Regions,
+                                const Expr *E, unsigned Count,
+                                const LocationContext *LCtx,
+                                bool CausedByPointerEscape,
+                                InvalidatedSymbols *IS,
+                                const CallEvent *Call,
+                                RegionList ConstRegions) const {
+  SmallVector<SVal, 8> Values;
+  for (RegionList::const_iterator I = Regions.begin(),
+                                  End = Regions.end(); I != End; ++I)
+    Values.push_back(loc::MemRegionVal(*I));
+
+  SmallVector<SVal, 8> ConstValues;
+  for (RegionList::const_iterator I = ConstRegions.begin(),
+                                  End = ConstRegions.end(); I != End; ++I)
+    ConstValues.push_back(loc::MemRegionVal(*I));
+
+  if (!IS) {
+    InvalidatedSymbols invalidated;
+    return invalidateRegionsImpl(Values, E, Count, LCtx,
+                                 CausedByPointerEscape,
+                                 invalidated, Call, ConstValues);
+  }
+  return invalidateRegionsImpl(Values, E, Count, LCtx, CausedByPointerEscape,
+                               *IS, Call, ConstValues);
+}
+
+ProgramStateRef
+ProgramState::invalidateRegions(ValueList Values,
+                                const Expr *E, unsigned Count,
+                                const LocationContext *LCtx,
+                                bool CausedByPointerEscape,
+                                InvalidatedSymbols *IS,
+                                const CallEvent *Call,
+                                ValueList ConstValues) const {
+  if (!IS) {
+    InvalidatedSymbols invalidated;
+    return invalidateRegionsImpl(Values, E, Count, LCtx,
+                                 CausedByPointerEscape,
+                                 invalidated, Call, ConstValues);
+  }
+  return invalidateRegionsImpl(Values, E, Count, LCtx, CausedByPointerEscape,
+                               *IS, Call, ConstValues);
+}
+
+ProgramStateRef
+ProgramState::invalidateRegionsImpl(ValueList Values,
+                                    const Expr *E, unsigned Count,
+                                    const LocationContext *LCtx,
+                                    bool CausedByPointerEscape,
+                                    InvalidatedSymbols &IS,
+                                    const CallEvent *Call,
+                                    ValueList ConstValues) const {
+  ProgramStateManager &Mgr = getStateManager();
+  SubEngine* Eng = Mgr.getOwningEngine();
+  InvalidatedSymbols ConstIS;
+
+  if (Eng) {
+    StoreManager::InvalidatedRegions TopLevelInvalidated;
+    StoreManager::InvalidatedRegions TopLevelConstInvalidated;
+    StoreManager::InvalidatedRegions Invalidated;
+    const StoreRef &newStore
+    = Mgr.StoreMgr->invalidateRegions(getStore(), Values, ConstValues,
+                                      E, Count, LCtx, Call,
+                                      IS, ConstIS,
+                                      &TopLevelInvalidated,
+                                      &TopLevelConstInvalidated,
+                                      &Invalidated);
+
+    ProgramStateRef newState = makeWithStore(newStore);
+
+    if (CausedByPointerEscape) {
+      newState = Eng->notifyCheckersOfPointerEscape(newState, &IS,
+                                                    TopLevelInvalidated,
+                                                    Invalidated, Call);
+      if (!ConstValues.empty()) {
+        StoreManager::InvalidatedRegions Empty;
+        newState = Eng->notifyCheckersOfPointerEscape(newState, &ConstIS,
+                                                      TopLevelConstInvalidated,
+                                                      Empty, Call,
+                                                      true);
+      }
+    }
+
+    return Eng->processRegionChanges(newState, &IS,
+                                     TopLevelInvalidated, Invalidated,
+                                     Call);
+  }
+
+  const StoreRef &newStore =
+  Mgr.StoreMgr->invalidateRegions(getStore(), Values, ConstValues,
+                                  E, Count, LCtx, Call,
+                                  IS, ConstIS, NULL, NULL, NULL);
+  return makeWithStore(newStore);
+}
+
+ProgramStateRef ProgramState::killBinding(Loc LV) const {
+  assert(!LV.getAs<loc::MemRegionVal>() && "Use invalidateRegion instead.");
+
+  Store OldStore = getStore();
+  const StoreRef &newStore =
+    getStateManager().StoreMgr->killBinding(OldStore, LV);
+
+  if (newStore.getStore() == OldStore)
+    return this;
+
+  return makeWithStore(newStore);
+}
+
+ProgramStateRef 
+ProgramState::enterStackFrame(const CallEvent &Call,
+                              const StackFrameContext *CalleeCtx) const {
+  const StoreRef &NewStore =
+    getStateManager().StoreMgr->enterStackFrame(getStore(), Call, CalleeCtx);
+  return makeWithStore(NewStore);
+}
+
+SVal ProgramState::getSValAsScalarOrLoc(const MemRegion *R) const {
+  // We only want to do fetches from regions that we can actually bind
+  // values.  For example, SymbolicRegions of type 'id<...>' cannot
+  // have direct bindings (but their can be bindings on their subregions).
+  if (!R->isBoundable())
+    return UnknownVal();
+
+  if (const TypedValueRegion *TR = dyn_cast<TypedValueRegion>(R)) {
+    QualType T = TR->getValueType();
+    if (Loc::isLocType(T) || T->isIntegralOrEnumerationType())
+      return getSVal(R);
+  }
+
+  return UnknownVal();
+}
+
+SVal ProgramState::getSVal(Loc location, QualType T) const {
+  SVal V = getRawSVal(cast<Loc>(location), T);
+
+  // If 'V' is a symbolic value that is *perfectly* constrained to
+  // be a constant value, use that value instead to lessen the burden
+  // on later analysis stages (so we have less symbolic values to reason
+  // about).
+  if (!T.isNull()) {
+    if (SymbolRef sym = V.getAsSymbol()) {
+      if (const llvm::APSInt *Int = getStateManager()
+                                    .getConstraintManager()
+                                    .getSymVal(this, sym)) {
+        // FIXME: Because we don't correctly model (yet) sign-extension
+        // and truncation of symbolic values, we need to convert
+        // the integer value to the correct signedness and bitwidth.
+        //
+        // This shows up in the following:
+        //
+        //   char foo();
+        //   unsigned x = foo();
+        //   if (x == 54)
+        //     ...
+        //
+        //  The symbolic value stored to 'x' is actually the conjured
+        //  symbol for the call to foo(); the type of that symbol is 'char',
+        //  not unsigned.
+        const llvm::APSInt &NewV = getBasicVals().Convert(T, *Int);
+        
+        if (V.getAs<Loc>())
+          return loc::ConcreteInt(NewV);
+        else
+          return nonloc::ConcreteInt(NewV);
+      }
+    }
+  }
+  
+  return V;
+}
+
+ProgramStateRef ProgramState::BindExpr(const Stmt *S,
+                                           const LocationContext *LCtx,
+                                           SVal V, bool Invalidate) const{
+  Environment NewEnv =
+    getStateManager().EnvMgr.bindExpr(Env, EnvironmentEntry(S, LCtx), V,
+                                      Invalidate);
+  if (NewEnv == Env)
+    return this;
+
+  ProgramState NewSt = *this;
+  NewSt.Env = NewEnv;
+  return getStateManager().getPersistentState(NewSt);
+}
+
+ProgramStateRef ProgramState::assumeInBound(DefinedOrUnknownSVal Idx,
+                                      DefinedOrUnknownSVal UpperBound,
+                                      bool Assumption,
+                                      QualType indexTy) const {
+  if (Idx.isUnknown() || UpperBound.isUnknown())
+    return this;
+
+  // Build an expression for 0 <= Idx < UpperBound.
+  // This is the same as Idx + MIN < UpperBound + MIN, if overflow is allowed.
+  // FIXME: This should probably be part of SValBuilder.
+  ProgramStateManager &SM = getStateManager();
+  SValBuilder &svalBuilder = SM.getSValBuilder();
+  ASTContext &Ctx = svalBuilder.getContext();
+
+  // Get the offset: the minimum value of the array index type.
+  BasicValueFactory &BVF = svalBuilder.getBasicValueFactory();
+  // FIXME: This should be using ValueManager::ArrayindexTy...somehow.
+  if (indexTy.isNull())
+    indexTy = Ctx.IntTy;
+  nonloc::ConcreteInt Min(BVF.getMinValue(indexTy));
+
+  // Adjust the index.
+  SVal newIdx = svalBuilder.evalBinOpNN(this, BO_Add,
+                                        Idx.castAs<NonLoc>(), Min, indexTy);
+  if (newIdx.isUnknownOrUndef())
+    return this;
+
+  // Adjust the upper bound.
+  SVal newBound =
+    svalBuilder.evalBinOpNN(this, BO_Add, UpperBound.castAs<NonLoc>(),
+                            Min, indexTy);
+
+  if (newBound.isUnknownOrUndef())
+    return this;
+
+  // Build the actual comparison.
+  SVal inBound = svalBuilder.evalBinOpNN(this, BO_LT, newIdx.castAs<NonLoc>(),
+                                         newBound.castAs<NonLoc>(), Ctx.IntTy);
+  if (inBound.isUnknownOrUndef())
+    return this;
+
+  // Finally, let the constraint manager take care of it.
+  ConstraintManager &CM = SM.getConstraintManager();
+  return CM.assume(this, inBound.castAs<DefinedSVal>(), Assumption);
+}
+
+ConditionTruthVal ProgramState::isNull(SVal V) const {
+  if (V.isZeroConstant())
+    return true;
+
+  if (V.isConstant())
+    return false;
+  
+  SymbolRef Sym = V.getAsSymbol(/* IncludeBaseRegion */ true);
+  if (!Sym)
+    return ConditionTruthVal();
+  
+  return getStateManager().ConstraintMgr->isNull(this, Sym);
+}
+
+ProgramStateRef ProgramStateManager::getInitialState(const LocationContext *InitLoc) {
+  ProgramState State(this,
+                EnvMgr.getInitialEnvironment(),
+                StoreMgr->getInitialStore(InitLoc),
+                GDMFactory.getEmptyMap());
+
+  return getPersistentState(State);
+}
+
+ProgramStateRef ProgramStateManager::getPersistentStateWithGDM(
+                                                     ProgramStateRef FromState,
+                                                     ProgramStateRef GDMState) {
+  ProgramState NewState(*FromState);
+  NewState.GDM = GDMState->GDM;
+  return getPersistentState(NewState);
+}
+
+ProgramStateRef ProgramStateManager::getPersistentState(ProgramState &State) {
+
+  llvm::FoldingSetNodeID ID;
+  State.Profile(ID);
+  void *InsertPos;
+
+  if (ProgramState *I = StateSet.FindNodeOrInsertPos(ID, InsertPos))
+    return I;
+
+  ProgramState *newState = 0;
+  if (!freeStates.empty()) {
+    newState = freeStates.back();
+    freeStates.pop_back();    
+  }
+  else {
+    newState = (ProgramState*) Alloc.Allocate<ProgramState>();
+  }
+  new (newState) ProgramState(State);
+  StateSet.InsertNode(newState, InsertPos);
+  return newState;
+}
+
+ProgramStateRef ProgramState::makeWithStore(const StoreRef &store) const {
+  ProgramState NewSt(*this);
+  NewSt.setStore(store);
+  return getStateManager().getPersistentState(NewSt);
+}
+
+void ProgramState::setStore(const StoreRef &newStore) {
+  Store newStoreStore = newStore.getStore();
+  if (newStoreStore)
+    stateMgr->getStoreManager().incrementReferenceCount(newStoreStore);
+  if (store)
+    stateMgr->getStoreManager().decrementReferenceCount(store);
+  store = newStoreStore;
+}
+
+//===----------------------------------------------------------------------===//
+//  State pretty-printing.
+//===----------------------------------------------------------------------===//
+
+void ProgramState::print(raw_ostream &Out,
+                         const char *NL, const char *Sep) const {
+  // Print the store.
+  ProgramStateManager &Mgr = getStateManager();
+  Mgr.getStoreManager().print(getStore(), Out, NL, Sep);
+
+  // Print out the environment.
+  Env.print(Out, NL, Sep);
+
+  // Print out the constraints.
+  Mgr.getConstraintManager().print(this, Out, NL, Sep);
+
+  // Print checker-specific data.
+  Mgr.getOwningEngine()->printState(Out, this, NL, Sep);
+}
+
+void ProgramState::printDOT(raw_ostream &Out) const {
+  print(Out, "\\l", "\\|");
+}
+
+void ProgramState::dump() const {
+  print(llvm::errs());
+}
+
+void ProgramState::printTaint(raw_ostream &Out,
+                              const char *NL, const char *Sep) const {
+  TaintMapImpl TM = get<TaintMap>();
+
+  if (!TM.isEmpty())
+    Out <<"Tainted Symbols:" << NL;
+
+  for (TaintMapImpl::iterator I = TM.begin(), E = TM.end(); I != E; ++I) {
+    Out << I->first << " : " << I->second << NL;
+  }
+}
+
+void ProgramState::dumpTaint() const {
+  printTaint(llvm::errs());
+}
+
+//===----------------------------------------------------------------------===//
+// Generic Data Map.
+//===----------------------------------------------------------------------===//
+
+void *const* ProgramState::FindGDM(void *K) const {
+  return GDM.lookup(K);
+}
+
+void*
+ProgramStateManager::FindGDMContext(void *K,
+                               void *(*CreateContext)(llvm::BumpPtrAllocator&),
+                               void (*DeleteContext)(void*)) {
+
+  std::pair<void*, void (*)(void*)>& p = GDMContexts[K];
+  if (!p.first) {
+    p.first = CreateContext(Alloc);
+    p.second = DeleteContext;
+  }
+
+  return p.first;
+}
+
+ProgramStateRef ProgramStateManager::addGDM(ProgramStateRef St, void *Key, void *Data){
+  ProgramState::GenericDataMap M1 = St->getGDM();
+  ProgramState::GenericDataMap M2 = GDMFactory.add(M1, Key, Data);
+
+  if (M1 == M2)
+    return St;
+
+  ProgramState NewSt = *St;
+  NewSt.GDM = M2;
+  return getPersistentState(NewSt);
+}
+
+ProgramStateRef ProgramStateManager::removeGDM(ProgramStateRef state, void *Key) {
+  ProgramState::GenericDataMap OldM = state->getGDM();
+  ProgramState::GenericDataMap NewM = GDMFactory.remove(OldM, Key);
+
+  if (NewM == OldM)
+    return state;
+
+  ProgramState NewState = *state;
+  NewState.GDM = NewM;
+  return getPersistentState(NewState);
+}
+
+bool ScanReachableSymbols::scan(nonloc::CompoundVal val) {
+  for (nonloc::CompoundVal::iterator I=val.begin(), E=val.end(); I!=E; ++I)
+    if (!scan(*I))
+      return false;
+
+  return true;
+}
+
+bool ScanReachableSymbols::scan(const SymExpr *sym) {
+  unsigned &isVisited = visited[sym];
+  if (isVisited)
+    return true;
+  isVisited = 1;
+  
+  if (!visitor.VisitSymbol(sym))
+    return false;
+  
+  // TODO: should be rewritten using SymExpr::symbol_iterator.
+  switch (sym->getKind()) {
+    case SymExpr::RegionValueKind:
+    case SymExpr::ConjuredKind:
+    case SymExpr::DerivedKind:
+    case SymExpr::ExtentKind:
+    case SymExpr::MetadataKind:
+      break;
+    case SymExpr::CastSymbolKind:
+      return scan(cast<SymbolCast>(sym)->getOperand());
+    case SymExpr::SymIntKind:
+      return scan(cast<SymIntExpr>(sym)->getLHS());
+    case SymExpr::IntSymKind:
+      return scan(cast<IntSymExpr>(sym)->getRHS());
+    case SymExpr::SymSymKind: {
+      const SymSymExpr *x = cast<SymSymExpr>(sym);
+      return scan(x->getLHS()) && scan(x->getRHS());
+    }
+  }
+  return true;
+}
+
+bool ScanReachableSymbols::scan(SVal val) {
+  if (Optional<loc::MemRegionVal> X = val.getAs<loc::MemRegionVal>())
+    return scan(X->getRegion());
+
+  if (Optional<nonloc::LazyCompoundVal> X =
+          val.getAs<nonloc::LazyCompoundVal>()) {
+    StoreManager &StoreMgr = state->getStateManager().getStoreManager();
+    // FIXME: We don't really want to use getBaseRegion() here because pointer
+    // arithmetic doesn't apply, but scanReachableSymbols only accepts base
+    // regions right now.
+    if (!StoreMgr.scanReachableSymbols(X->getStore(),
+                                       X->getRegion()->getBaseRegion(),
+                                       *this))
+      return false;
+  }
+
+  if (Optional<nonloc::LocAsInteger> X = val.getAs<nonloc::LocAsInteger>())
+    return scan(X->getLoc());
+
+  if (SymbolRef Sym = val.getAsSymbol())
+    return scan(Sym);
+
+  if (const SymExpr *Sym = val.getAsSymbolicExpression())
+    return scan(Sym);
+
+  if (Optional<nonloc::CompoundVal> X = val.getAs<nonloc::CompoundVal>())
+    return scan(*X);
+
+  return true;
+}
+
+bool ScanReachableSymbols::scan(const MemRegion *R) {
+  if (isa<MemSpaceRegion>(R))
+    return true;
+  
+  unsigned &isVisited = visited[R];
+  if (isVisited)
+    return true;
+  isVisited = 1;
+  
+  
+  if (!visitor.VisitMemRegion(R))
+    return false;
+
+  // If this is a symbolic region, visit the symbol for the region.
+  if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R))
+    if (!visitor.VisitSymbol(SR->getSymbol()))
+      return false;
+
+  // If this is a subregion, also visit the parent regions.
+  if (const SubRegion *SR = dyn_cast<SubRegion>(R)) {
+    const MemRegion *Super = SR->getSuperRegion();
+    if (!scan(Super))
+      return false;
+
+    // When we reach the topmost region, scan all symbols in it.
+    if (isa<MemSpaceRegion>(Super)) {
+      StoreManager &StoreMgr = state->getStateManager().getStoreManager();
+      if (!StoreMgr.scanReachableSymbols(state->getStore(), SR, *this))
+        return false;
+    }
+  }
+
+  // Regions captured by a block are also implicitly reachable.
+  if (const BlockDataRegion *BDR = dyn_cast<BlockDataRegion>(R)) {
+    BlockDataRegion::referenced_vars_iterator I = BDR->referenced_vars_begin(),
+                                              E = BDR->referenced_vars_end();
+    for ( ; I != E; ++I) {
+      if (!scan(I.getCapturedRegion()))
+        return false;
+    }
+  }
+
+  return true;
+}
+
+bool ProgramState::scanReachableSymbols(SVal val, SymbolVisitor& visitor) const {
+  ScanReachableSymbols S(this, visitor);
+  return S.scan(val);
+}
+
+bool ProgramState::scanReachableSymbols(const SVal *I, const SVal *E,
+                                   SymbolVisitor &visitor) const {
+  ScanReachableSymbols S(this, visitor);
+  for ( ; I != E; ++I) {
+    if (!S.scan(*I))
+      return false;
+  }
+  return true;
+}
+
+bool ProgramState::scanReachableSymbols(const MemRegion * const *I,
+                                   const MemRegion * const *E,
+                                   SymbolVisitor &visitor) const {
+  ScanReachableSymbols S(this, visitor);
+  for ( ; I != E; ++I) {
+    if (!S.scan(*I))
+      return false;
+  }
+  return true;
+}
+
+ProgramStateRef ProgramState::addTaint(const Stmt *S,
+                                           const LocationContext *LCtx,
+                                           TaintTagType Kind) const {
+  if (const Expr *E = dyn_cast_or_null<Expr>(S))
+    S = E->IgnoreParens();
+
+  SymbolRef Sym = getSVal(S, LCtx).getAsSymbol();
+  if (Sym)
+    return addTaint(Sym, Kind);
+
+  const MemRegion *R = getSVal(S, LCtx).getAsRegion();
+  addTaint(R, Kind);
+
+  // Cannot add taint, so just return the state.
+  return this;
+}
+
+ProgramStateRef ProgramState::addTaint(const MemRegion *R,
+                                           TaintTagType Kind) const {
+  if (const SymbolicRegion *SR = dyn_cast_or_null<SymbolicRegion>(R))
+    return addTaint(SR->getSymbol(), Kind);
+  return this;
+}
+
+ProgramStateRef ProgramState::addTaint(SymbolRef Sym,
+                                           TaintTagType Kind) const {
+  // If this is a symbol cast, remove the cast before adding the taint. Taint
+  // is cast agnostic.
+  while (const SymbolCast *SC = dyn_cast<SymbolCast>(Sym))
+    Sym = SC->getOperand();
+
+  ProgramStateRef NewState = set<TaintMap>(Sym, Kind);
+  assert(NewState);
+  return NewState;
+}
+
+bool ProgramState::isTainted(const Stmt *S, const LocationContext *LCtx,
+                             TaintTagType Kind) const {
+  if (const Expr *E = dyn_cast_or_null<Expr>(S))
+    S = E->IgnoreParens();
+
+  SVal val = getSVal(S, LCtx);
+  return isTainted(val, Kind);
+}
+
+bool ProgramState::isTainted(SVal V, TaintTagType Kind) const {
+  if (const SymExpr *Sym = V.getAsSymExpr())
+    return isTainted(Sym, Kind);
+  if (const MemRegion *Reg = V.getAsRegion())
+    return isTainted(Reg, Kind);
+  return false;
+}
+
+bool ProgramState::isTainted(const MemRegion *Reg, TaintTagType K) const {
+  if (!Reg)
+    return false;
+
+  // Element region (array element) is tainted if either the base or the offset
+  // are tainted.
+  if (const ElementRegion *ER = dyn_cast<ElementRegion>(Reg))
+    return isTainted(ER->getSuperRegion(), K) || isTainted(ER->getIndex(), K);
+
+  if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(Reg))
+    return isTainted(SR->getSymbol(), K);
+
+  if (const SubRegion *ER = dyn_cast<SubRegion>(Reg))
+    return isTainted(ER->getSuperRegion(), K);
+
+  return false;
+}
+
+bool ProgramState::isTainted(SymbolRef Sym, TaintTagType Kind) const {
+  if (!Sym)
+    return false;
+  
+  // Traverse all the symbols this symbol depends on to see if any are tainted.
+  bool Tainted = false;
+  for (SymExpr::symbol_iterator SI = Sym->symbol_begin(), SE =Sym->symbol_end();
+       SI != SE; ++SI) {
+    if (!isa<SymbolData>(*SI))
+      continue;
+    
+    const TaintTagType *Tag = get<TaintMap>(*SI);
+    Tainted = (Tag && *Tag == Kind);
+
+    // If this is a SymbolDerived with a tainted parent, it's also tainted.
+    if (const SymbolDerived *SD = dyn_cast<SymbolDerived>(*SI))
+      Tainted = Tainted || isTainted(SD->getParentSymbol(), Kind);
+
+    // If memory region is tainted, data is also tainted.
+    if (const SymbolRegionValue *SRV = dyn_cast<SymbolRegionValue>(*SI))
+      Tainted = Tainted || isTainted(SRV->getRegion(), Kind);
+
+    // If If this is a SymbolCast from a tainted value, it's also tainted.
+    if (const SymbolCast *SC = dyn_cast<SymbolCast>(*SI))
+      Tainted = Tainted || isTainted(SC->getOperand(), Kind);
+
+    if (Tainted)
+      return true;
+  }
+  
+  return Tainted;
+}
+
+/// The GDM component containing the dynamic type info. This is a map from a
+/// symbol to its most likely type.
+REGISTER_TRAIT_WITH_PROGRAMSTATE(DynamicTypeMap,
+                                 CLANG_ENTO_PROGRAMSTATE_MAP(const MemRegion *,
+                                                             DynamicTypeInfo))
+
+DynamicTypeInfo ProgramState::getDynamicTypeInfo(const MemRegion *Reg) const {
+  Reg = Reg->StripCasts();
+
+  // Look up the dynamic type in the GDM.
+  const DynamicTypeInfo *GDMType = get<DynamicTypeMap>(Reg);
+  if (GDMType)
+    return *GDMType;
+
+  // Otherwise, fall back to what we know about the region.
+  if (const TypedRegion *TR = dyn_cast<TypedRegion>(Reg))
+    return DynamicTypeInfo(TR->getLocationType(), /*CanBeSubclass=*/false);
+
+  if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(Reg)) {
+    SymbolRef Sym = SR->getSymbol();
+    return DynamicTypeInfo(Sym->getType());
+  }
+
+  return DynamicTypeInfo();
+}
+
+ProgramStateRef ProgramState::setDynamicTypeInfo(const MemRegion *Reg,
+                                                 DynamicTypeInfo NewTy) const {
+  Reg = Reg->StripCasts();
+  ProgramStateRef NewState = set<DynamicTypeMap>(Reg, NewTy);
+  assert(NewState);
+  return NewState;
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
new file mode 100644
index 0000000..3606e09
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
@@ -0,0 +1,587 @@
+//== RangeConstraintManager.cpp - Manage range constraints.------*- C++ -*--==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines RangeConstraintManager, a class that tracks simple
+//  equality and inequality constraints on symbolic values of ProgramState.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SimpleConstraintManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/ImmutableSet.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+/// A Range represents the closed range [from, to].  The caller must
+/// guarantee that from <= to.  Note that Range is immutable, so as not
+/// to subvert RangeSet's immutability.
+namespace {
+class Range : public std::pair<const llvm::APSInt*,
+                                                const llvm::APSInt*> {
+public:
+  Range(const llvm::APSInt &from, const llvm::APSInt &to)
+    : std::pair<const llvm::APSInt*, const llvm::APSInt*>(&from, &to) {
+    assert(from <= to);
+  }
+  bool Includes(const llvm::APSInt &v) const {
+    return *first <= v && v <= *second;
+  }
+  const llvm::APSInt &From() const {
+    return *first;
+  }
+  const llvm::APSInt &To() const {
+    return *second;
+  }
+  const llvm::APSInt *getConcreteValue() const {
+    return &From() == &To() ? &From() : NULL;
+  }
+
+  void Profile(llvm::FoldingSetNodeID &ID) const {
+    ID.AddPointer(&From());
+    ID.AddPointer(&To());
+  }
+};
+
+
+class RangeTrait : public llvm::ImutContainerInfo<Range> {
+public:
+  // When comparing if one Range is less than another, we should compare
+  // the actual APSInt values instead of their pointers.  This keeps the order
+  // consistent (instead of comparing by pointer values) and can potentially
+  // be used to speed up some of the operations in RangeSet.
+  static inline bool isLess(key_type_ref lhs, key_type_ref rhs) {
+    return *lhs.first < *rhs.first || (!(*rhs.first < *lhs.first) &&
+                                       *lhs.second < *rhs.second);
+  }
+};
+
+/// RangeSet contains a set of ranges. If the set is empty, then
+///  there the value of a symbol is overly constrained and there are no
+///  possible values for that symbol.
+class RangeSet {
+  typedef llvm::ImmutableSet<Range, RangeTrait> PrimRangeSet;
+  PrimRangeSet ranges; // no need to make const, since it is an
+                       // ImmutableSet - this allows default operator=
+                       // to work.
+public:
+  typedef PrimRangeSet::Factory Factory;
+  typedef PrimRangeSet::iterator iterator;
+
+  RangeSet(PrimRangeSet RS) : ranges(RS) {}
+
+  iterator begin() const { return ranges.begin(); }
+  iterator end() const { return ranges.end(); }
+
+  bool isEmpty() const { return ranges.isEmpty(); }
+
+  /// Construct a new RangeSet representing '{ [from, to] }'.
+  RangeSet(Factory &F, const llvm::APSInt &from, const llvm::APSInt &to)
+    : ranges(F.add(F.getEmptySet(), Range(from, to))) {}
+
+  /// Profile - Generates a hash profile of this RangeSet for use
+  ///  by FoldingSet.
+  void Profile(llvm::FoldingSetNodeID &ID) const { ranges.Profile(ID); }
+
+  /// getConcreteValue - If a symbol is contrained to equal a specific integer
+  ///  constant then this method returns that value.  Otherwise, it returns
+  ///  NULL.
+  const llvm::APSInt* getConcreteValue() const {
+    return ranges.isSingleton() ? ranges.begin()->getConcreteValue() : 0;
+  }
+
+private:
+  void IntersectInRange(BasicValueFactory &BV, Factory &F,
+                        const llvm::APSInt &Lower,
+                        const llvm::APSInt &Upper,
+                        PrimRangeSet &newRanges,
+                        PrimRangeSet::iterator &i,
+                        PrimRangeSet::iterator &e) const {
+    // There are six cases for each range R in the set:
+    //   1. R is entirely before the intersection range.
+    //   2. R is entirely after the intersection range.
+    //   3. R contains the entire intersection range.
+    //   4. R starts before the intersection range and ends in the middle.
+    //   5. R starts in the middle of the intersection range and ends after it.
+    //   6. R is entirely contained in the intersection range.
+    // These correspond to each of the conditions below.
+    for (/* i = begin(), e = end() */; i != e; ++i) {
+      if (i->To() < Lower) {
+        continue;
+      }
+      if (i->From() > Upper) {
+        break;
+      }
+
+      if (i->Includes(Lower)) {
+        if (i->Includes(Upper)) {
+          newRanges = F.add(newRanges, Range(BV.getValue(Lower),
+                                             BV.getValue(Upper)));
+          break;
+        } else
+          newRanges = F.add(newRanges, Range(BV.getValue(Lower), i->To()));
+      } else {
+        if (i->Includes(Upper)) {
+          newRanges = F.add(newRanges, Range(i->From(), BV.getValue(Upper)));
+          break;
+        } else
+          newRanges = F.add(newRanges, *i);
+      }
+    }
+  }
+
+  const llvm::APSInt &getMinValue() const {
+    assert(!isEmpty());
+    return ranges.begin()->From();
+  }
+
+  bool pin(llvm::APSInt &Lower, llvm::APSInt &Upper) const {
+    // This function has nine cases, the cartesian product of range-testing
+    // both the upper and lower bounds against the symbol's type.
+    // Each case requires a different pinning operation.
+    // The function returns false if the described range is entirely outside
+    // the range of values for the associated symbol.
+    APSIntType Type(getMinValue());
+    APSIntType::RangeTestResultKind LowerTest = Type.testInRange(Lower, true);
+    APSIntType::RangeTestResultKind UpperTest = Type.testInRange(Upper, true);
+
+    switch (LowerTest) {
+    case APSIntType::RTR_Below:
+      switch (UpperTest) {
+      case APSIntType::RTR_Below:
+        // The entire range is outside the symbol's set of possible values.
+        // If this is a conventionally-ordered range, the state is infeasible.
+        if (Lower < Upper)
+          return false;
+
+        // However, if the range wraps around, it spans all possible values.
+        Lower = Type.getMinValue();
+        Upper = Type.getMaxValue();
+        break;
+      case APSIntType::RTR_Within:
+        // The range starts below what's possible but ends within it. Pin.
+        Lower = Type.getMinValue();
+        Type.apply(Upper);
+        break;
+      case APSIntType::RTR_Above:
+        // The range spans all possible values for the symbol. Pin.
+        Lower = Type.getMinValue();
+        Upper = Type.getMaxValue();
+        break;
+      }
+      break;
+    case APSIntType::RTR_Within:
+      switch (UpperTest) {
+      case APSIntType::RTR_Below:
+        // The range wraps around, but all lower values are not possible.
+        Type.apply(Lower);
+        Upper = Type.getMaxValue();
+        break;
+      case APSIntType::RTR_Within:
+        // The range may or may not wrap around, but both limits are valid.
+        Type.apply(Lower);
+        Type.apply(Upper);
+        break;
+      case APSIntType::RTR_Above:
+        // The range starts within what's possible but ends above it. Pin.
+        Type.apply(Lower);
+        Upper = Type.getMaxValue();
+        break;
+      }
+      break;
+    case APSIntType::RTR_Above:
+      switch (UpperTest) {
+      case APSIntType::RTR_Below:
+        // The range wraps but is outside the symbol's set of possible values.
+        return false;
+      case APSIntType::RTR_Within:
+        // The range starts above what's possible but ends within it (wrap).
+        Lower = Type.getMinValue();
+        Type.apply(Upper);
+        break;
+      case APSIntType::RTR_Above:
+        // The entire range is outside the symbol's set of possible values.
+        // If this is a conventionally-ordered range, the state is infeasible.
+        if (Lower < Upper)
+          return false;
+
+        // However, if the range wraps around, it spans all possible values.
+        Lower = Type.getMinValue();
+        Upper = Type.getMaxValue();
+        break;
+      }
+      break;
+    }
+
+    return true;
+  }
+
+public:
+  // Returns a set containing the values in the receiving set, intersected with
+  // the closed range [Lower, Upper]. Unlike the Range type, this range uses
+  // modular arithmetic, corresponding to the common treatment of C integer
+  // overflow. Thus, if the Lower bound is greater than the Upper bound, the
+  // range is taken to wrap around. This is equivalent to taking the
+  // intersection with the two ranges [Min, Upper] and [Lower, Max],
+  // or, alternatively, /removing/ all integers between Upper and Lower.
+  RangeSet Intersect(BasicValueFactory &BV, Factory &F,
+                     llvm::APSInt Lower, llvm::APSInt Upper) const {
+    if (!pin(Lower, Upper))
+      return F.getEmptySet();
+
+    PrimRangeSet newRanges = F.getEmptySet();
+
+    PrimRangeSet::iterator i = begin(), e = end();
+    if (Lower <= Upper)
+      IntersectInRange(BV, F, Lower, Upper, newRanges, i, e);
+    else {
+      // The order of the next two statements is important!
+      // IntersectInRange() does not reset the iteration state for i and e.
+      // Therefore, the lower range most be handled first.
+      IntersectInRange(BV, F, BV.getMinValue(Upper), Upper, newRanges, i, e);
+      IntersectInRange(BV, F, Lower, BV.getMaxValue(Lower), newRanges, i, e);
+    }
+
+    return newRanges;
+  }
+
+  void print(raw_ostream &os) const {
+    bool isFirst = true;
+    os << "{ ";
+    for (iterator i = begin(), e = end(); i != e; ++i) {
+      if (isFirst)
+        isFirst = false;
+      else
+        os << ", ";
+
+      os << '[' << i->From().toString(10) << ", " << i->To().toString(10)
+         << ']';
+    }
+    os << " }";
+  }
+
+  bool operator==(const RangeSet &other) const {
+    return ranges == other.ranges;
+  }
+};
+} // end anonymous namespace
+
+REGISTER_TRAIT_WITH_PROGRAMSTATE(ConstraintRange,
+                                 CLANG_ENTO_PROGRAMSTATE_MAP(SymbolRef,
+                                                             RangeSet))
+
+namespace {
+class RangeConstraintManager : public SimpleConstraintManager{
+  RangeSet GetRange(ProgramStateRef state, SymbolRef sym);
+public:
+  RangeConstraintManager(SubEngine *subengine, SValBuilder &SVB)
+    : SimpleConstraintManager(subengine, SVB) {}
+
+  ProgramStateRef assumeSymNE(ProgramStateRef state, SymbolRef sym,
+                             const llvm::APSInt& Int,
+                             const llvm::APSInt& Adjustment);
+
+  ProgramStateRef assumeSymEQ(ProgramStateRef state, SymbolRef sym,
+                             const llvm::APSInt& Int,
+                             const llvm::APSInt& Adjustment);
+
+  ProgramStateRef assumeSymLT(ProgramStateRef state, SymbolRef sym,
+                             const llvm::APSInt& Int,
+                             const llvm::APSInt& Adjustment);
+
+  ProgramStateRef assumeSymGT(ProgramStateRef state, SymbolRef sym,
+                             const llvm::APSInt& Int,
+                             const llvm::APSInt& Adjustment);
+
+  ProgramStateRef assumeSymGE(ProgramStateRef state, SymbolRef sym,
+                             const llvm::APSInt& Int,
+                             const llvm::APSInt& Adjustment);
+
+  ProgramStateRef assumeSymLE(ProgramStateRef state, SymbolRef sym,
+                             const llvm::APSInt& Int,
+                             const llvm::APSInt& Adjustment);
+
+  const llvm::APSInt* getSymVal(ProgramStateRef St, SymbolRef sym) const;
+  ConditionTruthVal checkNull(ProgramStateRef State, SymbolRef Sym);
+
+  ProgramStateRef removeDeadBindings(ProgramStateRef St, SymbolReaper& SymReaper);
+
+  void print(ProgramStateRef St, raw_ostream &Out,
+             const char* nl, const char *sep);
+
+private:
+  RangeSet::Factory F;
+};
+
+} // end anonymous namespace
+
+ConstraintManager *
+ento::CreateRangeConstraintManager(ProgramStateManager &StMgr, SubEngine *Eng) {
+  return new RangeConstraintManager(Eng, StMgr.getSValBuilder());
+}
+
+const llvm::APSInt* RangeConstraintManager::getSymVal(ProgramStateRef St,
+                                                      SymbolRef sym) const {
+  const ConstraintRangeTy::data_type *T = St->get<ConstraintRange>(sym);
+  return T ? T->getConcreteValue() : NULL;
+}
+
+ConditionTruthVal RangeConstraintManager::checkNull(ProgramStateRef State,
+                                                    SymbolRef Sym) {
+  const RangeSet *Ranges = State->get<ConstraintRange>(Sym);
+
+  // If we don't have any information about this symbol, it's underconstrained.
+  if (!Ranges)
+    return ConditionTruthVal();
+
+  // If we have a concrete value, see if it's zero.
+  if (const llvm::APSInt *Value = Ranges->getConcreteValue())
+    return *Value == 0;
+
+  BasicValueFactory &BV = getBasicVals();
+  APSIntType IntType = BV.getAPSIntType(Sym->getType());
+  llvm::APSInt Zero = IntType.getZeroValue();
+
+  // Check if zero is in the set of possible values.
+  if (Ranges->Intersect(BV, F, Zero, Zero).isEmpty())
+    return false;
+
+  // Zero is a possible value, but it is not the /only/ possible value.
+  return ConditionTruthVal();
+}
+
+/// Scan all symbols referenced by the constraints. If the symbol is not alive
+/// as marked in LSymbols, mark it as dead in DSymbols.
+ProgramStateRef 
+RangeConstraintManager::removeDeadBindings(ProgramStateRef state,
+                                           SymbolReaper& SymReaper) {
+
+  ConstraintRangeTy CR = state->get<ConstraintRange>();
+  ConstraintRangeTy::Factory& CRFactory = state->get_context<ConstraintRange>();
+
+  for (ConstraintRangeTy::iterator I = CR.begin(), E = CR.end(); I != E; ++I) {
+    SymbolRef sym = I.getKey();
+    if (SymReaper.maybeDead(sym))
+      CR = CRFactory.remove(CR, sym);
+  }
+
+  return state->set<ConstraintRange>(CR);
+}
+
+RangeSet
+RangeConstraintManager::GetRange(ProgramStateRef state, SymbolRef sym) {
+  if (ConstraintRangeTy::data_type* V = state->get<ConstraintRange>(sym))
+    return *V;
+
+  // Lazily generate a new RangeSet representing all possible values for the
+  // given symbol type.
+  BasicValueFactory &BV = getBasicVals();
+  QualType T = sym->getType();
+
+  RangeSet Result(F, BV.getMinValue(T), BV.getMaxValue(T));
+
+  // Special case: references are known to be non-zero.
+  if (T->isReferenceType()) {
+    APSIntType IntType = BV.getAPSIntType(T);
+    Result = Result.Intersect(BV, F, ++IntType.getZeroValue(),
+                                     --IntType.getZeroValue());
+  }
+
+  return Result;
+}
+
+//===------------------------------------------------------------------------===
+// assumeSymX methods: public interface for RangeConstraintManager.
+//===------------------------------------------------------------------------===/
+
+// The syntax for ranges below is mathematical, using [x, y] for closed ranges
+// and (x, y) for open ranges. These ranges are modular, corresponding with
+// a common treatment of C integer overflow. This means that these methods
+// do not have to worry about overflow; RangeSet::Intersect can handle such a
+// "wraparound" range.
+// As an example, the range [UINT_MAX-1, 3) contains five values: UINT_MAX-1,
+// UINT_MAX, 0, 1, and 2.
+
+ProgramStateRef 
+RangeConstraintManager::assumeSymNE(ProgramStateRef St, SymbolRef Sym,
+                                    const llvm::APSInt &Int,
+                                    const llvm::APSInt &Adjustment) {
+  // Before we do any real work, see if the value can even show up.
+  APSIntType AdjustmentType(Adjustment);
+  if (AdjustmentType.testInRange(Int, true) != APSIntType::RTR_Within)
+    return St;
+
+  llvm::APSInt Lower = AdjustmentType.convert(Int) - Adjustment;
+  llvm::APSInt Upper = Lower;
+  --Lower;
+  ++Upper;
+
+  // [Int-Adjustment+1, Int-Adjustment-1]
+  // Notice that the lower bound is greater than the upper bound.
+  RangeSet New = GetRange(St, Sym).Intersect(getBasicVals(), F, Upper, Lower);
+  return New.isEmpty() ? NULL : St->set<ConstraintRange>(Sym, New);
+}
+
+ProgramStateRef 
+RangeConstraintManager::assumeSymEQ(ProgramStateRef St, SymbolRef Sym,
+                                    const llvm::APSInt &Int,
+                                    const llvm::APSInt &Adjustment) {
+  // Before we do any real work, see if the value can even show up.
+  APSIntType AdjustmentType(Adjustment);
+  if (AdjustmentType.testInRange(Int, true) != APSIntType::RTR_Within)
+    return NULL;
+
+  // [Int-Adjustment, Int-Adjustment]
+  llvm::APSInt AdjInt = AdjustmentType.convert(Int) - Adjustment;
+  RangeSet New = GetRange(St, Sym).Intersect(getBasicVals(), F, AdjInt, AdjInt);
+  return New.isEmpty() ? NULL : St->set<ConstraintRange>(Sym, New);
+}
+
+ProgramStateRef 
+RangeConstraintManager::assumeSymLT(ProgramStateRef St, SymbolRef Sym,
+                                    const llvm::APSInt &Int,
+                                    const llvm::APSInt &Adjustment) {
+  // Before we do any real work, see if the value can even show up.
+  APSIntType AdjustmentType(Adjustment);
+  switch (AdjustmentType.testInRange(Int, true)) {
+  case APSIntType::RTR_Below:
+    return NULL;
+  case APSIntType::RTR_Within:
+    break;
+  case APSIntType::RTR_Above:
+    return St;
+  }
+
+  // Special case for Int == Min. This is always false.
+  llvm::APSInt ComparisonVal = AdjustmentType.convert(Int);
+  llvm::APSInt Min = AdjustmentType.getMinValue();
+  if (ComparisonVal == Min)
+    return NULL;
+
+  llvm::APSInt Lower = Min-Adjustment;
+  llvm::APSInt Upper = ComparisonVal-Adjustment;
+  --Upper;
+
+  RangeSet New = GetRange(St, Sym).Intersect(getBasicVals(), F, Lower, Upper);
+  return New.isEmpty() ? NULL : St->set<ConstraintRange>(Sym, New);
+}
+
+ProgramStateRef 
+RangeConstraintManager::assumeSymGT(ProgramStateRef St, SymbolRef Sym,
+                                    const llvm::APSInt &Int,
+                                    const llvm::APSInt &Adjustment) {
+  // Before we do any real work, see if the value can even show up.
+  APSIntType AdjustmentType(Adjustment);
+  switch (AdjustmentType.testInRange(Int, true)) {
+  case APSIntType::RTR_Below:
+    return St;
+  case APSIntType::RTR_Within:
+    break;
+  case APSIntType::RTR_Above:
+    return NULL;
+  }
+
+  // Special case for Int == Max. This is always false.
+  llvm::APSInt ComparisonVal = AdjustmentType.convert(Int);
+  llvm::APSInt Max = AdjustmentType.getMaxValue();
+  if (ComparisonVal == Max)
+    return NULL;
+
+  llvm::APSInt Lower = ComparisonVal-Adjustment;
+  llvm::APSInt Upper = Max-Adjustment;
+  ++Lower;
+
+  RangeSet New = GetRange(St, Sym).Intersect(getBasicVals(), F, Lower, Upper);
+  return New.isEmpty() ? NULL : St->set<ConstraintRange>(Sym, New);
+}
+
+ProgramStateRef 
+RangeConstraintManager::assumeSymGE(ProgramStateRef St, SymbolRef Sym,
+                                    const llvm::APSInt &Int,
+                                    const llvm::APSInt &Adjustment) {
+  // Before we do any real work, see if the value can even show up.
+  APSIntType AdjustmentType(Adjustment);
+  switch (AdjustmentType.testInRange(Int, true)) {
+  case APSIntType::RTR_Below:
+    return St;
+  case APSIntType::RTR_Within:
+    break;
+  case APSIntType::RTR_Above:
+    return NULL;
+  }
+
+  // Special case for Int == Min. This is always feasible.
+  llvm::APSInt ComparisonVal = AdjustmentType.convert(Int);
+  llvm::APSInt Min = AdjustmentType.getMinValue();
+  if (ComparisonVal == Min)
+    return St;
+
+  llvm::APSInt Max = AdjustmentType.getMaxValue();
+  llvm::APSInt Lower = ComparisonVal-Adjustment;
+  llvm::APSInt Upper = Max-Adjustment;
+
+  RangeSet New = GetRange(St, Sym).Intersect(getBasicVals(), F, Lower, Upper);
+  return New.isEmpty() ? NULL : St->set<ConstraintRange>(Sym, New);
+}
+
+ProgramStateRef 
+RangeConstraintManager::assumeSymLE(ProgramStateRef St, SymbolRef Sym,
+                                    const llvm::APSInt &Int,
+                                    const llvm::APSInt &Adjustment) {
+  // Before we do any real work, see if the value can even show up.
+  APSIntType AdjustmentType(Adjustment);
+  switch (AdjustmentType.testInRange(Int, true)) {
+  case APSIntType::RTR_Below:
+    return NULL;
+  case APSIntType::RTR_Within:
+    break;
+  case APSIntType::RTR_Above:
+    return St;
+  }
+
+  // Special case for Int == Max. This is always feasible.
+  llvm::APSInt ComparisonVal = AdjustmentType.convert(Int);
+  llvm::APSInt Max = AdjustmentType.getMaxValue();
+  if (ComparisonVal == Max)
+    return St;
+
+  llvm::APSInt Min = AdjustmentType.getMinValue();
+  llvm::APSInt Lower = Min-Adjustment;
+  llvm::APSInt Upper = ComparisonVal-Adjustment;
+
+  RangeSet New = GetRange(St, Sym).Intersect(getBasicVals(), F, Lower, Upper);
+  return New.isEmpty() ? NULL : St->set<ConstraintRange>(Sym, New);
+}
+
+//===------------------------------------------------------------------------===
+// Pretty-printing.
+//===------------------------------------------------------------------------===/
+
+void RangeConstraintManager::print(ProgramStateRef St, raw_ostream &Out,
+                                   const char* nl, const char *sep) {
+
+  ConstraintRangeTy Ranges = St->get<ConstraintRange>();
+
+  if (Ranges.isEmpty()) {
+    Out << nl << sep << "Ranges are empty." << nl;
+    return;
+  }
+
+  Out << nl << sep << "Ranges of symbol values:";
+  for (ConstraintRangeTy::iterator I=Ranges.begin(), E=Ranges.end(); I!=E; ++I){
+    Out << nl << ' ' << I.getKey() << " : ";
+    I.getData().print(Out);
+  }
+  Out << nl;
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
new file mode 100644
index 0000000..88c4eee
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -0,0 +1,2370 @@
+//== RegionStore.cpp - Field-sensitive store model --------------*- C++ -*--==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a basic region store model. In this model, we do have field
+// sensitivity. But we assume nothing about the heap shape. So recursive data
+// structures are largely ignored. Basically we do 1-limiting analysis.
+// Parameter pointers are assumed with no aliasing. Pointee objects of
+// parameters are created lazily.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/Attr.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
+#include "llvm/ADT/ImmutableList.h"
+#include "llvm/ADT/ImmutableMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+//===----------------------------------------------------------------------===//
+// Representation of binding keys.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class BindingKey {
+public:
+  enum Kind { Default = 0x0, Direct = 0x1 };
+private:
+  enum { Symbolic = 0x2 };
+
+  llvm::PointerIntPair<const MemRegion *, 2> P;
+  uint64_t Data;
+
+  /// Create a key for a binding to region \p r, which has a symbolic offset
+  /// from region \p Base.
+  explicit BindingKey(const SubRegion *r, const SubRegion *Base, Kind k)
+    : P(r, k | Symbolic), Data(reinterpret_cast<uintptr_t>(Base)) {
+    assert(r && Base && "Must have known regions.");
+    assert(getConcreteOffsetRegion() == Base && "Failed to store base region");
+  }
+
+  /// Create a key for a binding at \p offset from base region \p r.
+  explicit BindingKey(const MemRegion *r, uint64_t offset, Kind k)
+    : P(r, k), Data(offset) {
+    assert(r && "Must have known regions.");
+    assert(getOffset() == offset && "Failed to store offset");
+    assert((r == r->getBaseRegion() || isa<ObjCIvarRegion>(r)) && "Not a base");
+  }
+public:
+
+  bool isDirect() const { return P.getInt() & Direct; }
+  bool hasSymbolicOffset() const { return P.getInt() & Symbolic; }
+
+  const MemRegion *getRegion() const { return P.getPointer(); }
+  uint64_t getOffset() const {
+    assert(!hasSymbolicOffset());
+    return Data;
+  }
+
+  const SubRegion *getConcreteOffsetRegion() const {
+    assert(hasSymbolicOffset());
+    return reinterpret_cast<const SubRegion *>(static_cast<uintptr_t>(Data));
+  }
+
+  const MemRegion *getBaseRegion() const {
+    if (hasSymbolicOffset())
+      return getConcreteOffsetRegion()->getBaseRegion();
+    return getRegion()->getBaseRegion();
+  }
+
+  void Profile(llvm::FoldingSetNodeID& ID) const {
+    ID.AddPointer(P.getOpaqueValue());
+    ID.AddInteger(Data);
+  }
+
+  static BindingKey Make(const MemRegion *R, Kind k);
+
+  bool operator<(const BindingKey &X) const {
+    if (P.getOpaqueValue() < X.P.getOpaqueValue())
+      return true;
+    if (P.getOpaqueValue() > X.P.getOpaqueValue())
+      return false;
+    return Data < X.Data;
+  }
+
+  bool operator==(const BindingKey &X) const {
+    return P.getOpaqueValue() == X.P.getOpaqueValue() &&
+           Data == X.Data;
+  }
+
+  LLVM_ATTRIBUTE_USED void dump() const;
+};
+} // end anonymous namespace
+
+BindingKey BindingKey::Make(const MemRegion *R, Kind k) {
+  const RegionOffset &RO = R->getAsOffset();
+  if (RO.hasSymbolicOffset())
+    return BindingKey(cast<SubRegion>(R), cast<SubRegion>(RO.getRegion()), k);
+
+  return BindingKey(RO.getRegion(), RO.getOffset(), k);
+}
+
+namespace llvm {
+  static inline
+  raw_ostream &operator<<(raw_ostream &os, BindingKey K) {
+    os << '(' << K.getRegion();
+    if (!K.hasSymbolicOffset())
+      os << ',' << K.getOffset();
+    os << ',' << (K.isDirect() ? "direct" : "default")
+       << ')';
+    return os;
+  }
+
+  template <typename T> struct isPodLike;
+  template <> struct isPodLike<BindingKey> {
+    static const bool value = true;
+  };
+} // end llvm namespace
+
+void BindingKey::dump() const {
+  llvm::errs() << *this;
+}
+
+//===----------------------------------------------------------------------===//
+// Actual Store type.
+//===----------------------------------------------------------------------===//
+
+typedef llvm::ImmutableMap<BindingKey, SVal>    ClusterBindings;
+typedef llvm::ImmutableMapRef<BindingKey, SVal> ClusterBindingsRef;
+typedef std::pair<BindingKey, SVal> BindingPair;
+
+typedef llvm::ImmutableMap<const MemRegion *, ClusterBindings>
+        RegionBindings;
+
+namespace {
+class RegionBindingsRef : public llvm::ImmutableMapRef<const MemRegion *,
+                                 ClusterBindings> {
+ ClusterBindings::Factory &CBFactory;
+public:
+  typedef llvm::ImmutableMapRef<const MemRegion *, ClusterBindings>
+          ParentTy;
+
+  RegionBindingsRef(ClusterBindings::Factory &CBFactory,
+                    const RegionBindings::TreeTy *T,
+                    RegionBindings::TreeTy::Factory *F)
+    : llvm::ImmutableMapRef<const MemRegion *, ClusterBindings>(T, F),
+      CBFactory(CBFactory) {}
+
+  RegionBindingsRef(const ParentTy &P, ClusterBindings::Factory &CBFactory)
+    : llvm::ImmutableMapRef<const MemRegion *, ClusterBindings>(P),
+      CBFactory(CBFactory) {}
+
+  RegionBindingsRef add(key_type_ref K, data_type_ref D) const {
+    return RegionBindingsRef(static_cast<const ParentTy*>(this)->add(K, D),
+                             CBFactory);
+  }
+
+  RegionBindingsRef remove(key_type_ref K) const {
+    return RegionBindingsRef(static_cast<const ParentTy*>(this)->remove(K),
+                             CBFactory);
+  }
+
+  RegionBindingsRef addBinding(BindingKey K, SVal V) const;
+
+  RegionBindingsRef addBinding(const MemRegion *R,
+                               BindingKey::Kind k, SVal V) const;
+
+  RegionBindingsRef &operator=(const RegionBindingsRef &X) {
+    *static_cast<ParentTy*>(this) = X;
+    return *this;
+  }
+
+  const SVal *lookup(BindingKey K) const;
+  const SVal *lookup(const MemRegion *R, BindingKey::Kind k) const;
+  const ClusterBindings *lookup(const MemRegion *R) const {
+    return static_cast<const ParentTy*>(this)->lookup(R);
+  }
+
+  RegionBindingsRef removeBinding(BindingKey K);
+
+  RegionBindingsRef removeBinding(const MemRegion *R,
+                                  BindingKey::Kind k);
+
+  RegionBindingsRef removeBinding(const MemRegion *R) {
+    return removeBinding(R, BindingKey::Direct).
+           removeBinding(R, BindingKey::Default);
+  }
+
+  Optional<SVal> getDirectBinding(const MemRegion *R) const;
+
+  /// getDefaultBinding - Returns an SVal* representing an optional default
+  ///  binding associated with a region and its subregions.
+  Optional<SVal> getDefaultBinding(const MemRegion *R) const;
+
+  /// Return the internal tree as a Store.
+  Store asStore() const {
+    return asImmutableMap().getRootWithoutRetain();
+  }
+
+  void dump(raw_ostream &OS, const char *nl) const {
+   for (iterator I = begin(), E = end(); I != E; ++I) {
+     const ClusterBindings &Cluster = I.getData();
+     for (ClusterBindings::iterator CI = Cluster.begin(), CE = Cluster.end();
+          CI != CE; ++CI) {
+       OS << ' ' << CI.getKey() << " : " << CI.getData() << nl;
+     }
+     OS << nl;
+   }
+  }
+
+  LLVM_ATTRIBUTE_USED void dump() const {
+    dump(llvm::errs(), "\n");
+  }
+};
+} // end anonymous namespace
+
+typedef const RegionBindingsRef& RegionBindingsConstRef;
+
+Optional<SVal> RegionBindingsRef::getDirectBinding(const MemRegion *R) const {
+  return Optional<SVal>::create(lookup(R, BindingKey::Direct));
+}
+
+Optional<SVal> RegionBindingsRef::getDefaultBinding(const MemRegion *R) const {
+  if (R->isBoundable())
+    if (const TypedValueRegion *TR = dyn_cast<TypedValueRegion>(R))
+      if (TR->getValueType()->isUnionType())
+        return UnknownVal();
+
+  return Optional<SVal>::create(lookup(R, BindingKey::Default));
+}
+
+RegionBindingsRef RegionBindingsRef::addBinding(BindingKey K, SVal V) const {
+  const MemRegion *Base = K.getBaseRegion();
+
+  const ClusterBindings *ExistingCluster = lookup(Base);
+  ClusterBindings Cluster = (ExistingCluster ? *ExistingCluster
+                             : CBFactory.getEmptyMap());
+
+  ClusterBindings NewCluster = CBFactory.add(Cluster, K, V);
+  return add(Base, NewCluster);
+}
+
+
+RegionBindingsRef RegionBindingsRef::addBinding(const MemRegion *R,
+                                                BindingKey::Kind k,
+                                                SVal V) const {
+  return addBinding(BindingKey::Make(R, k), V);
+}
+
+const SVal *RegionBindingsRef::lookup(BindingKey K) const {
+  const ClusterBindings *Cluster = lookup(K.getBaseRegion());
+  if (!Cluster)
+    return 0;
+  return Cluster->lookup(K);
+}
+
+const SVal *RegionBindingsRef::lookup(const MemRegion *R,
+                                      BindingKey::Kind k) const {
+  return lookup(BindingKey::Make(R, k));
+}
+
+RegionBindingsRef RegionBindingsRef::removeBinding(BindingKey K) {
+  const MemRegion *Base = K.getBaseRegion();
+  const ClusterBindings *Cluster = lookup(Base);
+  if (!Cluster)
+    return *this;
+
+  ClusterBindings NewCluster = CBFactory.remove(*Cluster, K);
+  if (NewCluster.isEmpty())
+    return remove(Base);
+  return add(Base, NewCluster);
+}
+
+RegionBindingsRef RegionBindingsRef::removeBinding(const MemRegion *R,
+                                                BindingKey::Kind k){
+  return removeBinding(BindingKey::Make(R, k));
+}
+
+//===----------------------------------------------------------------------===//
+// Fine-grained control of RegionStoreManager.
+//===----------------------------------------------------------------------===//
+
+namespace {
+struct minimal_features_tag {};
+struct maximal_features_tag {};
+
+class RegionStoreFeatures {
+  bool SupportsFields;
+public:
+  RegionStoreFeatures(minimal_features_tag) :
+    SupportsFields(false) {}
+
+  RegionStoreFeatures(maximal_features_tag) :
+    SupportsFields(true) {}
+
+  void enableFields(bool t) { SupportsFields = t; }
+
+  bool supportsFields() const { return SupportsFields; }
+};
+}
+
+//===----------------------------------------------------------------------===//
+// Main RegionStore logic.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class invalidateRegionsWorker;
+
+class RegionStoreManager : public StoreManager {
+public:
+  const RegionStoreFeatures Features;
+
+  RegionBindings::Factory RBFactory;
+  mutable ClusterBindings::Factory CBFactory;
+
+  typedef std::vector<SVal> SValListTy;
+private:
+  typedef llvm::DenseMap<const LazyCompoundValData *,
+                         SValListTy> LazyBindingsMapTy;
+  LazyBindingsMapTy LazyBindingsMap;
+
+  /// The largest number of fields a struct can have and still be
+  /// considered "small".
+  ///
+  /// This is currently used to decide whether or not it is worth "forcing" a
+  /// LazyCompoundVal on bind.
+  ///
+  /// This is controlled by 'region-store-small-struct-limit' option.
+  /// To disable all small-struct-dependent behavior, set the option to "0".
+  unsigned SmallStructLimit;
+
+  /// \brief A helper used to populate the work list with the given set of
+  /// regions.
+  void populateWorkList(invalidateRegionsWorker &W,
+                        ArrayRef<SVal> Values,
+                        bool IsArrayOfConstRegions,
+                        InvalidatedRegions *TopLevelRegions);
+
+public:
+  RegionStoreManager(ProgramStateManager& mgr, const RegionStoreFeatures &f)
+    : StoreManager(mgr), Features(f),
+      RBFactory(mgr.getAllocator()), CBFactory(mgr.getAllocator()),
+      SmallStructLimit(0) {
+    if (SubEngine *Eng = StateMgr.getOwningEngine()) {
+      AnalyzerOptions &Options = Eng->getAnalysisManager().options;
+      SmallStructLimit =
+        Options.getOptionAsInteger("region-store-small-struct-limit", 2);
+    }
+  }
+
+
+  /// setImplicitDefaultValue - Set the default binding for the provided
+  ///  MemRegion to the value implicitly defined for compound literals when
+  ///  the value is not specified.
+  RegionBindingsRef setImplicitDefaultValue(RegionBindingsConstRef B,
+                                            const MemRegion *R, QualType T);
+
+  /// ArrayToPointer - Emulates the "decay" of an array to a pointer
+  ///  type.  'Array' represents the lvalue of the array being decayed
+  ///  to a pointer, and the returned SVal represents the decayed
+  ///  version of that lvalue (i.e., a pointer to the first element of
+  ///  the array).  This is called by ExprEngine when evaluating
+  ///  casts from arrays to pointers.
+  SVal ArrayToPointer(Loc Array);
+
+  StoreRef getInitialStore(const LocationContext *InitLoc) {
+    return StoreRef(RBFactory.getEmptyMap().getRootWithoutRetain(), *this);
+  }
+
+  //===-------------------------------------------------------------------===//
+  // Binding values to regions.
+  //===-------------------------------------------------------------------===//
+  RegionBindingsRef invalidateGlobalRegion(MemRegion::Kind K,
+                                           const Expr *Ex,
+                                           unsigned Count,
+                                           const LocationContext *LCtx,
+                                           RegionBindingsRef B,
+                                           InvalidatedRegions *Invalidated);
+
+  StoreRef invalidateRegions(Store store,
+                             ArrayRef<SVal> Values,
+                             ArrayRef<SVal> ConstValues,
+                             const Expr *E, unsigned Count,
+                             const LocationContext *LCtx,
+                             const CallEvent *Call,
+                             InvalidatedSymbols &IS,
+                             InvalidatedSymbols &ConstIS,
+                             InvalidatedRegions *Invalidated,
+                             InvalidatedRegions *InvalidatedTopLevel,
+                             InvalidatedRegions *InvalidatedTopLevelConst);
+
+  bool scanReachableSymbols(Store S, const MemRegion *R,
+                            ScanReachableSymbols &Callbacks);
+
+  RegionBindingsRef removeSubRegionBindings(RegionBindingsConstRef B,
+                                            const SubRegion *R);
+
+public: // Part of public interface to class.
+
+  virtual StoreRef Bind(Store store, Loc LV, SVal V) {
+    return StoreRef(bind(getRegionBindings(store), LV, V).asStore(), *this);
+  }
+
+  RegionBindingsRef bind(RegionBindingsConstRef B, Loc LV, SVal V);
+
+  // BindDefault is only used to initialize a region with a default value.
+  StoreRef BindDefault(Store store, const MemRegion *R, SVal V) {
+    RegionBindingsRef B = getRegionBindings(store);
+    assert(!B.lookup(R, BindingKey::Default));
+    assert(!B.lookup(R, BindingKey::Direct));
+    return StoreRef(B.addBinding(R, BindingKey::Default, V)
+                     .asImmutableMap()
+                     .getRootWithoutRetain(), *this);
+  }
+
+  /// Attempt to extract the fields of \p LCV and bind them to the struct region
+  /// \p R.
+  ///
+  /// This path is used when it seems advantageous to "force" loading the values
+  /// within a LazyCompoundVal to bind memberwise to the struct region, rather
+  /// than using a Default binding at the base of the entire region. This is a
+  /// heuristic attempting to avoid building long chains of LazyCompoundVals.
+  ///
+  /// \returns The updated store bindings, or \c None if binding non-lazily
+  ///          would be too expensive.
+  Optional<RegionBindingsRef> tryBindSmallStruct(RegionBindingsConstRef B,
+                                                 const TypedValueRegion *R,
+                                                 const RecordDecl *RD,
+                                                 nonloc::LazyCompoundVal LCV);
+
+  /// BindStruct - Bind a compound value to a structure.
+  RegionBindingsRef bindStruct(RegionBindingsConstRef B,
+                               const TypedValueRegion* R, SVal V);
+
+  /// BindVector - Bind a compound value to a vector.
+  RegionBindingsRef bindVector(RegionBindingsConstRef B,
+                               const TypedValueRegion* R, SVal V);
+
+  RegionBindingsRef bindArray(RegionBindingsConstRef B,
+                              const TypedValueRegion* R,
+                              SVal V);
+
+  /// Clears out all bindings in the given region and assigns a new value
+  /// as a Default binding.
+  RegionBindingsRef bindAggregate(RegionBindingsConstRef B,
+                                  const TypedRegion *R,
+                                  SVal DefaultVal);
+
+  /// \brief Create a new store with the specified binding removed.
+  /// \param ST the original store, that is the basis for the new store.
+  /// \param L the location whose binding should be removed.
+  virtual StoreRef killBinding(Store ST, Loc L);
+
+  void incrementReferenceCount(Store store) {
+    getRegionBindings(store).manualRetain();    
+  }
+  
+  /// If the StoreManager supports it, decrement the reference count of
+  /// the specified Store object.  If the reference count hits 0, the memory
+  /// associated with the object is recycled.
+  void decrementReferenceCount(Store store) {
+    getRegionBindings(store).manualRelease();
+  }
+  
+  bool includedInBindings(Store store, const MemRegion *region) const;
+
+  /// \brief Return the value bound to specified location in a given state.
+  ///
+  /// The high level logic for this method is this:
+  /// getBinding (L)
+  ///   if L has binding
+  ///     return L's binding
+  ///   else if L is in killset
+  ///     return unknown
+  ///   else
+  ///     if L is on stack or heap
+  ///       return undefined
+  ///     else
+  ///       return symbolic
+  virtual SVal getBinding(Store S, Loc L, QualType T) {
+    return getBinding(getRegionBindings(S), L, T);
+  }
+
+  SVal getBinding(RegionBindingsConstRef B, Loc L, QualType T = QualType());
+
+  SVal getBindingForElement(RegionBindingsConstRef B, const ElementRegion *R);
+
+  SVal getBindingForField(RegionBindingsConstRef B, const FieldRegion *R);
+
+  SVal getBindingForObjCIvar(RegionBindingsConstRef B, const ObjCIvarRegion *R);
+
+  SVal getBindingForVar(RegionBindingsConstRef B, const VarRegion *R);
+
+  SVal getBindingForLazySymbol(const TypedValueRegion *R);
+
+  SVal getBindingForFieldOrElementCommon(RegionBindingsConstRef B,
+                                         const TypedValueRegion *R,
+                                         QualType Ty);
+  
+  SVal getLazyBinding(const SubRegion *LazyBindingRegion,
+                      RegionBindingsRef LazyBinding);
+
+  /// Get bindings for the values in a struct and return a CompoundVal, used
+  /// when doing struct copy:
+  /// struct s x, y;
+  /// x = y;
+  /// y's value is retrieved by this method.
+  SVal getBindingForStruct(RegionBindingsConstRef B, const TypedValueRegion *R);
+  SVal getBindingForArray(RegionBindingsConstRef B, const TypedValueRegion *R);
+  NonLoc createLazyBinding(RegionBindingsConstRef B, const TypedValueRegion *R);
+
+  /// Used to lazily generate derived symbols for bindings that are defined
+  /// implicitly by default bindings in a super region.
+  ///
+  /// Note that callers may need to specially handle LazyCompoundVals, which
+  /// are returned as is in case the caller needs to treat them differently.
+  Optional<SVal> getBindingForDerivedDefaultValue(RegionBindingsConstRef B,
+                                                  const MemRegion *superR,
+                                                  const TypedValueRegion *R,
+                                                  QualType Ty);
+
+  /// Get the state and region whose binding this region \p R corresponds to.
+  ///
+  /// If there is no lazy binding for \p R, the returned value will have a null
+  /// \c second. Note that a null pointer can represents a valid Store.
+  std::pair<Store, const SubRegion *>
+  findLazyBinding(RegionBindingsConstRef B, const SubRegion *R,
+                  const SubRegion *originalRegion);
+
+  /// Returns the cached set of interesting SVals contained within a lazy
+  /// binding.
+  ///
+  /// The precise value of "interesting" is determined for the purposes of
+  /// RegionStore's internal analysis. It must always contain all regions and
+  /// symbols, but may omit constants and other kinds of SVal.
+  const SValListTy &getInterestingValues(nonloc::LazyCompoundVal LCV);
+
+  //===------------------------------------------------------------------===//
+  // State pruning.
+  //===------------------------------------------------------------------===//
+
+  /// removeDeadBindings - Scans the RegionStore of 'state' for dead values.
+  ///  It returns a new Store with these values removed.
+  StoreRef removeDeadBindings(Store store, const StackFrameContext *LCtx,
+                              SymbolReaper& SymReaper);
+  
+  //===------------------------------------------------------------------===//
+  // Region "extents".
+  //===------------------------------------------------------------------===//
+
+  // FIXME: This method will soon be eliminated; see the note in Store.h.
+  DefinedOrUnknownSVal getSizeInElements(ProgramStateRef state,
+                                         const MemRegion* R, QualType EleTy);
+
+  //===------------------------------------------------------------------===//
+  // Utility methods.
+  //===------------------------------------------------------------------===//
+
+  RegionBindingsRef getRegionBindings(Store store) const {
+    return RegionBindingsRef(CBFactory,
+                             static_cast<const RegionBindings::TreeTy*>(store),
+                             RBFactory.getTreeFactory());
+  }
+
+  void print(Store store, raw_ostream &Out, const char* nl,
+             const char *sep);
+
+  void iterBindings(Store store, BindingsHandler& f) {
+    RegionBindingsRef B = getRegionBindings(store);
+    for (RegionBindingsRef::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+      const ClusterBindings &Cluster = I.getData();
+      for (ClusterBindings::iterator CI = Cluster.begin(), CE = Cluster.end();
+           CI != CE; ++CI) {
+        const BindingKey &K = CI.getKey();
+        if (!K.isDirect())
+          continue;
+        if (const SubRegion *R = dyn_cast<SubRegion>(K.getRegion())) {
+          // FIXME: Possibly incorporate the offset?
+          if (!f.HandleBinding(*this, store, R, CI.getData()))
+            return;
+        }
+      }
+    }
+  }
+};
+
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// RegionStore creation.
+//===----------------------------------------------------------------------===//
+
+StoreManager *ento::CreateRegionStoreManager(ProgramStateManager& StMgr) {
+  RegionStoreFeatures F = maximal_features_tag();
+  return new RegionStoreManager(StMgr, F);
+}
+
+StoreManager *
+ento::CreateFieldsOnlyRegionStoreManager(ProgramStateManager &StMgr) {
+  RegionStoreFeatures F = minimal_features_tag();
+  F.enableFields(true);
+  return new RegionStoreManager(StMgr, F);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Region Cluster analysis.
+//===----------------------------------------------------------------------===//
+
+namespace {
+/// Used to determine which global regions are automatically included in the
+/// initial worklist of a ClusterAnalysis.
+enum GlobalsFilterKind {
+  /// Don't include any global regions.
+  GFK_None,
+  /// Only include system globals.
+  GFK_SystemOnly,
+  /// Include all global regions.
+  GFK_All
+};
+
+template <typename DERIVED>
+class ClusterAnalysis  {
+protected:
+  typedef llvm::DenseMap<const MemRegion *, const ClusterBindings *> ClusterMap;
+  typedef llvm::PointerIntPair<const MemRegion *, 1, bool> WorkListElement;
+  typedef SmallVector<WorkListElement, 10> WorkList;
+
+  llvm::SmallPtrSet<const ClusterBindings *, 16> Visited;
+
+  WorkList WL;
+
+  RegionStoreManager &RM;
+  ASTContext &Ctx;
+  SValBuilder &svalBuilder;
+
+  RegionBindingsRef B;
+
+private:
+  GlobalsFilterKind GlobalsFilter;
+
+protected:
+  const ClusterBindings *getCluster(const MemRegion *R) {
+    return B.lookup(R);
+  }
+
+  /// Returns true if the memory space of the given region is one of the global
+  /// regions specially included at the start of analysis.
+  bool isInitiallyIncludedGlobalRegion(const MemRegion *R) {
+    switch (GlobalsFilter) {
+    case GFK_None:
+      return false;
+    case GFK_SystemOnly:
+      return isa<GlobalSystemSpaceRegion>(R->getMemorySpace());
+    case GFK_All:
+      return isa<NonStaticGlobalSpaceRegion>(R->getMemorySpace());
+    }
+
+    llvm_unreachable("unknown globals filter");
+  }
+
+public:
+  ClusterAnalysis(RegionStoreManager &rm, ProgramStateManager &StateMgr,
+                  RegionBindingsRef b, GlobalsFilterKind GFK)
+    : RM(rm), Ctx(StateMgr.getContext()),
+      svalBuilder(StateMgr.getSValBuilder()),
+      B(b), GlobalsFilter(GFK) {}
+
+  RegionBindingsRef getRegionBindings() const { return B; }
+
+  bool isVisited(const MemRegion *R) {
+    return Visited.count(getCluster(R));
+  }
+
+  void GenerateClusters() {
+    // Scan the entire set of bindings and record the region clusters.
+    for (RegionBindingsRef::iterator RI = B.begin(), RE = B.end();
+         RI != RE; ++RI){
+      const MemRegion *Base = RI.getKey();
+
+      const ClusterBindings &Cluster = RI.getData();
+      assert(!Cluster.isEmpty() && "Empty clusters should be removed");
+      static_cast<DERIVED*>(this)->VisitAddedToCluster(Base, Cluster);
+
+      // If this is an interesting global region, add it the work list up front.
+      if (isInitiallyIncludedGlobalRegion(Base))
+        AddToWorkList(WorkListElement(Base), &Cluster);
+    }
+  }
+
+  bool AddToWorkList(WorkListElement E, const ClusterBindings *C) {
+    if (C && !Visited.insert(C))
+      return false;
+    WL.push_back(E);
+    return true;
+  }
+
+  bool AddToWorkList(const MemRegion *R, bool Flag = false) {
+    const MemRegion *BaseR = R->getBaseRegion();
+    return AddToWorkList(WorkListElement(BaseR, Flag), getCluster(BaseR));
+  }
+
+  void RunWorkList() {
+    while (!WL.empty()) {
+      WorkListElement E = WL.pop_back_val();
+      const MemRegion *BaseR = E.getPointer();
+
+      static_cast<DERIVED*>(this)->VisitCluster(BaseR, getCluster(BaseR),
+                                                E.getInt());
+    }
+  }
+
+  void VisitAddedToCluster(const MemRegion *baseR, const ClusterBindings &C) {}
+  void VisitCluster(const MemRegion *baseR, const ClusterBindings *C) {}
+
+  void VisitCluster(const MemRegion *BaseR, const ClusterBindings *C,
+                    bool Flag) {
+    static_cast<DERIVED*>(this)->VisitCluster(BaseR, C);
+  }
+};
+}
+
+//===----------------------------------------------------------------------===//
+// Binding invalidation.
+//===----------------------------------------------------------------------===//
+
+bool RegionStoreManager::scanReachableSymbols(Store S, const MemRegion *R,
+                                              ScanReachableSymbols &Callbacks) {
+  assert(R == R->getBaseRegion() && "Should only be called for base regions");
+  RegionBindingsRef B = getRegionBindings(S);
+  const ClusterBindings *Cluster = B.lookup(R);
+
+  if (!Cluster)
+    return true;
+
+  for (ClusterBindings::iterator RI = Cluster->begin(), RE = Cluster->end();
+       RI != RE; ++RI) {
+    if (!Callbacks.scan(RI.getData()))
+      return false;
+  }
+
+  return true;
+}
+
+static inline bool isUnionField(const FieldRegion *FR) {
+  return FR->getDecl()->getParent()->isUnion();
+}
+
+typedef SmallVector<const FieldDecl *, 8> FieldVector;
+
+void getSymbolicOffsetFields(BindingKey K, FieldVector &Fields) {
+  assert(K.hasSymbolicOffset() && "Not implemented for concrete offset keys");
+
+  const MemRegion *Base = K.getConcreteOffsetRegion();
+  const MemRegion *R = K.getRegion();
+
+  while (R != Base) {
+    if (const FieldRegion *FR = dyn_cast<FieldRegion>(R))
+      if (!isUnionField(FR))
+        Fields.push_back(FR->getDecl());
+
+    R = cast<SubRegion>(R)->getSuperRegion();
+  }
+}
+
+static bool isCompatibleWithFields(BindingKey K, const FieldVector &Fields) {
+  assert(K.hasSymbolicOffset() && "Not implemented for concrete offset keys");
+
+  if (Fields.empty())
+    return true;
+
+  FieldVector FieldsInBindingKey;
+  getSymbolicOffsetFields(K, FieldsInBindingKey);
+
+  ptrdiff_t Delta = FieldsInBindingKey.size() - Fields.size();
+  if (Delta >= 0)
+    return std::equal(FieldsInBindingKey.begin() + Delta,
+                      FieldsInBindingKey.end(),
+                      Fields.begin());
+  else
+    return std::equal(FieldsInBindingKey.begin(), FieldsInBindingKey.end(),
+                      Fields.begin() - Delta);
+}
+
+/// Collects all bindings in \p Cluster that may refer to bindings within
+/// \p Top.
+///
+/// Each binding is a pair whose \c first is the key (a BindingKey) and whose
+/// \c second is the value (an SVal).
+///
+/// The \p IncludeAllDefaultBindings parameter specifies whether to include
+/// default bindings that may extend beyond \p Top itself, e.g. if \p Top is
+/// an aggregate within a larger aggregate with a default binding.
+static void
+collectSubRegionBindings(SmallVectorImpl<BindingPair> &Bindings,
+                         SValBuilder &SVB, const ClusterBindings &Cluster,
+                         const SubRegion *Top, BindingKey TopKey,
+                         bool IncludeAllDefaultBindings) {
+  FieldVector FieldsInSymbolicSubregions;
+  if (TopKey.hasSymbolicOffset()) {
+    getSymbolicOffsetFields(TopKey, FieldsInSymbolicSubregions);
+    Top = cast<SubRegion>(TopKey.getConcreteOffsetRegion());
+    TopKey = BindingKey::Make(Top, BindingKey::Default);
+  }
+
+  // Find the length (in bits) of the region being invalidated.
+  uint64_t Length = UINT64_MAX;
+  SVal Extent = Top->getExtent(SVB);
+  if (Optional<nonloc::ConcreteInt> ExtentCI =
+          Extent.getAs<nonloc::ConcreteInt>()) {
+    const llvm::APSInt &ExtentInt = ExtentCI->getValue();
+    assert(ExtentInt.isNonNegative() || ExtentInt.isUnsigned());
+    // Extents are in bytes but region offsets are in bits. Be careful!
+    Length = ExtentInt.getLimitedValue() * SVB.getContext().getCharWidth();
+  } else if (const FieldRegion *FR = dyn_cast<FieldRegion>(Top)) {
+    if (FR->getDecl()->isBitField())
+      Length = FR->getDecl()->getBitWidthValue(SVB.getContext());
+  }
+
+  for (ClusterBindings::iterator I = Cluster.begin(), E = Cluster.end();
+       I != E; ++I) {
+    BindingKey NextKey = I.getKey();
+    if (NextKey.getRegion() == TopKey.getRegion()) {
+      // FIXME: This doesn't catch the case where we're really invalidating a
+      // region with a symbolic offset. Example:
+      //      R: points[i].y
+      //   Next: points[0].x
+
+      if (NextKey.getOffset() > TopKey.getOffset() &&
+          NextKey.getOffset() - TopKey.getOffset() < Length) {
+        // Case 1: The next binding is inside the region we're invalidating.
+        // Include it.
+        Bindings.push_back(*I);
+
+      } else if (NextKey.getOffset() == TopKey.getOffset()) {
+        // Case 2: The next binding is at the same offset as the region we're
+        // invalidating. In this case, we need to leave default bindings alone,
+        // since they may be providing a default value for a regions beyond what
+        // we're invalidating.
+        // FIXME: This is probably incorrect; consider invalidating an outer
+        // struct whose first field is bound to a LazyCompoundVal.
+        if (IncludeAllDefaultBindings || NextKey.isDirect())
+          Bindings.push_back(*I);
+      }
+
+    } else if (NextKey.hasSymbolicOffset()) {
+      const MemRegion *Base = NextKey.getConcreteOffsetRegion();
+      if (Top->isSubRegionOf(Base)) {
+        // Case 3: The next key is symbolic and we just changed something within
+        // its concrete region. We don't know if the binding is still valid, so
+        // we'll be conservative and include it.
+        if (IncludeAllDefaultBindings || NextKey.isDirect())
+          if (isCompatibleWithFields(NextKey, FieldsInSymbolicSubregions))
+            Bindings.push_back(*I);
+      } else if (const SubRegion *BaseSR = dyn_cast<SubRegion>(Base)) {
+        // Case 4: The next key is symbolic, but we changed a known
+        // super-region. In this case the binding is certainly included.
+        if (Top == Base || BaseSR->isSubRegionOf(Top))
+          if (isCompatibleWithFields(NextKey, FieldsInSymbolicSubregions))
+            Bindings.push_back(*I);
+      }
+    }
+  }
+}
+
+static void
+collectSubRegionBindings(SmallVectorImpl<BindingPair> &Bindings,
+                         SValBuilder &SVB, const ClusterBindings &Cluster,
+                         const SubRegion *Top, bool IncludeAllDefaultBindings) {
+  collectSubRegionBindings(Bindings, SVB, Cluster, Top,
+                           BindingKey::Make(Top, BindingKey::Default),
+                           IncludeAllDefaultBindings);
+}
+
+RegionBindingsRef
+RegionStoreManager::removeSubRegionBindings(RegionBindingsConstRef B,
+                                            const SubRegion *Top) {
+  BindingKey TopKey = BindingKey::Make(Top, BindingKey::Default);
+  const MemRegion *ClusterHead = TopKey.getBaseRegion();
+
+  if (Top == ClusterHead) {
+    // We can remove an entire cluster's bindings all in one go.
+    return B.remove(Top);
+  }
+
+  const ClusterBindings *Cluster = B.lookup(ClusterHead);
+  if (!Cluster) {
+    // If we're invalidating a region with a symbolic offset, we need to make
+    // sure we don't treat the base region as uninitialized anymore.
+    if (TopKey.hasSymbolicOffset()) {
+      const SubRegion *Concrete = TopKey.getConcreteOffsetRegion();
+      return B.addBinding(Concrete, BindingKey::Default, UnknownVal());
+    }
+    return B;
+  }
+
+  SmallVector<BindingPair, 32> Bindings;
+  collectSubRegionBindings(Bindings, svalBuilder, *Cluster, Top, TopKey,
+                           /*IncludeAllDefaultBindings=*/false);
+
+  ClusterBindingsRef Result(*Cluster, CBFactory);
+  for (SmallVectorImpl<BindingPair>::const_iterator I = Bindings.begin(),
+                                                    E = Bindings.end();
+       I != E; ++I)
+    Result = Result.remove(I->first);
+
+  // If we're invalidating a region with a symbolic offset, we need to make sure
+  // we don't treat the base region as uninitialized anymore.
+  // FIXME: This isn't very precise; see the example in
+  // collectSubRegionBindings.
+  if (TopKey.hasSymbolicOffset()) {
+    const SubRegion *Concrete = TopKey.getConcreteOffsetRegion();
+    Result = Result.add(BindingKey::Make(Concrete, BindingKey::Default),
+                        UnknownVal());
+  }
+
+  if (Result.isEmpty())
+    return B.remove(ClusterHead);
+  return B.add(ClusterHead, Result.asImmutableMap());
+}
+
+namespace {
+class invalidateRegionsWorker : public ClusterAnalysis<invalidateRegionsWorker>
+{
+  const Expr *Ex;
+  unsigned Count;
+  const LocationContext *LCtx;
+  InvalidatedSymbols &IS;
+  InvalidatedSymbols &ConstIS;
+  StoreManager::InvalidatedRegions *Regions;
+public:
+  invalidateRegionsWorker(RegionStoreManager &rm,
+                          ProgramStateManager &stateMgr,
+                          RegionBindingsRef b,
+                          const Expr *ex, unsigned count,
+                          const LocationContext *lctx,
+                          InvalidatedSymbols &is,
+                          InvalidatedSymbols &inConstIS,
+                          StoreManager::InvalidatedRegions *r,
+                          GlobalsFilterKind GFK)
+    : ClusterAnalysis<invalidateRegionsWorker>(rm, stateMgr, b, GFK),
+      Ex(ex), Count(count), LCtx(lctx), IS(is), ConstIS(inConstIS), Regions(r){}
+
+  /// \param IsConst Specifies if the region we are invalidating is constant.
+  /// If it is, we invalidate all subregions, but not the base region itself.
+  void VisitCluster(const MemRegion *baseR, const ClusterBindings *C,
+                    bool IsConst);
+  void VisitBinding(SVal V);
+};
+}
+
+void invalidateRegionsWorker::VisitBinding(SVal V) {
+  // A symbol?  Mark it touched by the invalidation.
+  if (SymbolRef Sym = V.getAsSymbol())
+    IS.insert(Sym);
+
+  if (const MemRegion *R = V.getAsRegion()) {
+    AddToWorkList(R);
+    return;
+  }
+
+  // Is it a LazyCompoundVal?  All references get invalidated as well.
+  if (Optional<nonloc::LazyCompoundVal> LCS =
+          V.getAs<nonloc::LazyCompoundVal>()) {
+
+    const RegionStoreManager::SValListTy &Vals = RM.getInterestingValues(*LCS);
+
+    for (RegionStoreManager::SValListTy::const_iterator I = Vals.begin(),
+                                                        E = Vals.end();
+         I != E; ++I)
+      VisitBinding(*I);
+
+    return;
+  }
+}
+
+void invalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
+                                           const ClusterBindings *C,
+                                           bool IsConst) {
+  if (C) {
+    for (ClusterBindings::iterator I = C->begin(), E = C->end(); I != E; ++I)
+      VisitBinding(I.getData());
+
+    // Invalidate the contents of a non-const base region.
+    if (!IsConst)
+      B = B.remove(baseR);
+  }
+
+  // BlockDataRegion?  If so, invalidate captured variables that are passed
+  // by reference.
+  if (const BlockDataRegion *BR = dyn_cast<BlockDataRegion>(baseR)) {
+    for (BlockDataRegion::referenced_vars_iterator
+         BI = BR->referenced_vars_begin(), BE = BR->referenced_vars_end() ;
+         BI != BE; ++BI) {
+      const VarRegion *VR = BI.getCapturedRegion();
+      const VarDecl *VD = VR->getDecl();
+      if (VD->getAttr<BlocksAttr>() || !VD->hasLocalStorage()) {
+        AddToWorkList(VR);
+      }
+      else if (Loc::isLocType(VR->getValueType())) {
+        // Map the current bindings to a Store to retrieve the value
+        // of the binding.  If that binding itself is a region, we should
+        // invalidate that region.  This is because a block may capture
+        // a pointer value, but the thing pointed by that pointer may
+        // get invalidated.
+        SVal V = RM.getBinding(B, loc::MemRegionVal(VR));
+        if (Optional<Loc> L = V.getAs<Loc>()) {
+          if (const MemRegion *LR = L->getAsRegion())
+            AddToWorkList(LR);
+        }
+      }
+    }
+    return;
+  }
+
+  // Symbolic region?
+  if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(baseR)) {
+    SymbolRef RegionSym = SR->getSymbol();
+
+    // Mark that symbol touched by the invalidation.
+    if (IsConst)
+      ConstIS.insert(RegionSym);
+    else
+      IS.insert(RegionSym);
+  }
+
+  // Nothing else should be done for a const region.
+  if (IsConst)
+    return;
+
+  // Otherwise, we have a normal data region. Record that we touched the region.
+  if (Regions)
+    Regions->push_back(baseR);
+
+  if (isa<AllocaRegion>(baseR) || isa<SymbolicRegion>(baseR)) {
+    // Invalidate the region by setting its default value to
+    // conjured symbol. The type of the symbol is irrelavant.
+    DefinedOrUnknownSVal V =
+      svalBuilder.conjureSymbolVal(baseR, Ex, LCtx, Ctx.IntTy, Count);
+    B = B.addBinding(baseR, BindingKey::Default, V);
+    return;
+  }
+
+  if (!baseR->isBoundable())
+    return;
+
+  const TypedValueRegion *TR = cast<TypedValueRegion>(baseR);
+  QualType T = TR->getValueType();
+
+  if (isInitiallyIncludedGlobalRegion(baseR)) {
+    // If the region is a global and we are invalidating all globals,
+    // erasing the entry is good enough.  This causes all globals to be lazily
+    // symbolicated from the same base symbol.
+    return;
+  }
+
+  if (T->isStructureOrClassType()) {
+    // Invalidate the region by setting its default value to
+    // conjured symbol. The type of the symbol is irrelavant.
+    DefinedOrUnknownSVal V = svalBuilder.conjureSymbolVal(baseR, Ex, LCtx,
+                                                          Ctx.IntTy, Count);
+    B = B.addBinding(baseR, BindingKey::Default, V);
+    return;
+  }
+
+  if (const ArrayType *AT = Ctx.getAsArrayType(T)) {
+      // Set the default value of the array to conjured symbol.
+    DefinedOrUnknownSVal V =
+    svalBuilder.conjureSymbolVal(baseR, Ex, LCtx,
+                                     AT->getElementType(), Count);
+    B = B.addBinding(baseR, BindingKey::Default, V);
+    return;
+  }
+
+  DefinedOrUnknownSVal V = svalBuilder.conjureSymbolVal(baseR, Ex, LCtx,
+                                                        T,Count);
+  assert(SymbolManager::canSymbolicate(T) || V.isUnknown());
+  B = B.addBinding(baseR, BindingKey::Direct, V);
+}
+
+RegionBindingsRef
+RegionStoreManager::invalidateGlobalRegion(MemRegion::Kind K,
+                                           const Expr *Ex,
+                                           unsigned Count,
+                                           const LocationContext *LCtx,
+                                           RegionBindingsRef B,
+                                           InvalidatedRegions *Invalidated) {
+  // Bind the globals memory space to a new symbol that we will use to derive
+  // the bindings for all globals.
+  const GlobalsSpaceRegion *GS = MRMgr.getGlobalsRegion(K);
+  SVal V = svalBuilder.conjureSymbolVal(/* SymbolTag = */ (const void*) GS, Ex, LCtx,
+                                        /* type does not matter */ Ctx.IntTy,
+                                        Count);
+
+  B = B.removeBinding(GS)
+       .addBinding(BindingKey::Make(GS, BindingKey::Default), V);
+
+  // Even if there are no bindings in the global scope, we still need to
+  // record that we touched it.
+  if (Invalidated)
+    Invalidated->push_back(GS);
+
+  return B;
+}
+
+void RegionStoreManager::populateWorkList(invalidateRegionsWorker &W,
+                                          ArrayRef<SVal> Values,
+                                          bool IsArrayOfConstRegions,
+                                          InvalidatedRegions *TopLevelRegions) {
+  for (ArrayRef<SVal>::iterator I = Values.begin(),
+                                E = Values.end(); I != E; ++I) {
+    SVal V = *I;
+    if (Optional<nonloc::LazyCompoundVal> LCS =
+        V.getAs<nonloc::LazyCompoundVal>()) {
+
+      const SValListTy &Vals = getInterestingValues(*LCS);
+
+      for (SValListTy::const_iterator I = Vals.begin(),
+                                      E = Vals.end(); I != E; ++I) {
+        // Note: the last argument is false here because these are
+        // non-top-level regions.
+        if (const MemRegion *R = (*I).getAsRegion())
+          W.AddToWorkList(R, /*IsConst=*/ false);
+      }
+      continue;
+    }
+
+    if (const MemRegion *R = V.getAsRegion()) {
+      if (TopLevelRegions)
+        TopLevelRegions->push_back(R);
+      W.AddToWorkList(R, /*IsConst=*/ IsArrayOfConstRegions);
+      continue;
+    }
+  }
+}
+
+StoreRef
+RegionStoreManager::invalidateRegions(Store store,
+                                      ArrayRef<SVal> Values,
+                                      ArrayRef<SVal> ConstValues,
+                                      const Expr *Ex, unsigned Count,
+                                      const LocationContext *LCtx,
+                                      const CallEvent *Call,
+                                      InvalidatedSymbols &IS,
+                                      InvalidatedSymbols &ConstIS,
+                                      InvalidatedRegions *TopLevelRegions,
+                                      InvalidatedRegions *TopLevelConstRegions,
+                                      InvalidatedRegions *Invalidated) {
+  GlobalsFilterKind GlobalsFilter;
+  if (Call) {
+    if (Call->isInSystemHeader())
+      GlobalsFilter = GFK_SystemOnly;
+    else
+      GlobalsFilter = GFK_All;
+  } else {
+    GlobalsFilter = GFK_None;
+  }
+
+  RegionBindingsRef B = getRegionBindings(store);
+  invalidateRegionsWorker W(*this, StateMgr, B, Ex, Count, LCtx, IS, ConstIS,
+                            Invalidated, GlobalsFilter);
+
+  // Scan the bindings and generate the clusters.
+  W.GenerateClusters();
+
+  // Add the regions to the worklist.
+  populateWorkList(W, Values, /*IsArrayOfConstRegions*/ false,
+                   TopLevelRegions);
+  populateWorkList(W, ConstValues, /*IsArrayOfConstRegions*/ true,
+                   TopLevelConstRegions);
+
+  W.RunWorkList();
+
+  // Return the new bindings.
+  B = W.getRegionBindings();
+
+  // For calls, determine which global regions should be invalidated and
+  // invalidate them. (Note that function-static and immutable globals are never
+  // invalidated by this.)
+  // TODO: This could possibly be more precise with modules.
+  switch (GlobalsFilter) {
+  case GFK_All:
+    B = invalidateGlobalRegion(MemRegion::GlobalInternalSpaceRegionKind,
+                               Ex, Count, LCtx, B, Invalidated);
+    // FALLTHROUGH
+  case GFK_SystemOnly:
+    B = invalidateGlobalRegion(MemRegion::GlobalSystemSpaceRegionKind,
+                               Ex, Count, LCtx, B, Invalidated);
+    // FALLTHROUGH
+  case GFK_None:
+    break;
+  }
+
+  return StoreRef(B.asStore(), *this);
+}
+
+//===----------------------------------------------------------------------===//
+// Extents for regions.
+//===----------------------------------------------------------------------===//
+
+DefinedOrUnknownSVal
+RegionStoreManager::getSizeInElements(ProgramStateRef state,
+                                      const MemRegion *R,
+                                      QualType EleTy) {
+  SVal Size = cast<SubRegion>(R)->getExtent(svalBuilder);
+  const llvm::APSInt *SizeInt = svalBuilder.getKnownValue(state, Size);
+  if (!SizeInt)
+    return UnknownVal();
+
+  CharUnits RegionSize = CharUnits::fromQuantity(SizeInt->getSExtValue());
+
+  if (Ctx.getAsVariableArrayType(EleTy)) {
+    // FIXME: We need to track extra state to properly record the size
+    // of VLAs.  Returning UnknownVal here, however, is a stop-gap so that
+    // we don't have a divide-by-zero below.
+    return UnknownVal();
+  }
+
+  CharUnits EleSize = Ctx.getTypeSizeInChars(EleTy);
+
+  // If a variable is reinterpreted as a type that doesn't fit into a larger
+  // type evenly, round it down.
+  // This is a signed value, since it's used in arithmetic with signed indices.
+  return svalBuilder.makeIntVal(RegionSize / EleSize, false);
+}
+
+//===----------------------------------------------------------------------===//
+// Location and region casting.
+//===----------------------------------------------------------------------===//
+
+/// ArrayToPointer - Emulates the "decay" of an array to a pointer
+///  type.  'Array' represents the lvalue of the array being decayed
+///  to a pointer, and the returned SVal represents the decayed
+///  version of that lvalue (i.e., a pointer to the first element of
+///  the array).  This is called by ExprEngine when evaluating casts
+///  from arrays to pointers.
+SVal RegionStoreManager::ArrayToPointer(Loc Array) {
+  if (!Array.getAs<loc::MemRegionVal>())
+    return UnknownVal();
+
+  const MemRegion* R = Array.castAs<loc::MemRegionVal>().getRegion();
+  const TypedValueRegion* ArrayR = dyn_cast<TypedValueRegion>(R);
+
+  if (!ArrayR)
+    return UnknownVal();
+
+  // Strip off typedefs from the ArrayRegion's ValueType.
+  QualType T = ArrayR->getValueType().getDesugaredType(Ctx);
+  const ArrayType *AT = cast<ArrayType>(T);
+  T = AT->getElementType();
+
+  NonLoc ZeroIdx = svalBuilder.makeZeroArrayIndex();
+  return loc::MemRegionVal(MRMgr.getElementRegion(T, ZeroIdx, ArrayR, Ctx));
+}
+
+//===----------------------------------------------------------------------===//
+// Loading values from regions.
+//===----------------------------------------------------------------------===//
+
+SVal RegionStoreManager::getBinding(RegionBindingsConstRef B, Loc L, QualType T) {
+  assert(!L.getAs<UnknownVal>() && "location unknown");
+  assert(!L.getAs<UndefinedVal>() && "location undefined");
+
+  // For access to concrete addresses, return UnknownVal.  Checks
+  // for null dereferences (and similar errors) are done by checkers, not
+  // the Store.
+  // FIXME: We can consider lazily symbolicating such memory, but we really
+  // should defer this when we can reason easily about symbolicating arrays
+  // of bytes.
+  if (L.getAs<loc::ConcreteInt>()) {
+    return UnknownVal();
+  }
+  if (!L.getAs<loc::MemRegionVal>()) {
+    return UnknownVal();
+  }
+
+  const MemRegion *MR = L.castAs<loc::MemRegionVal>().getRegion();
+
+  if (isa<AllocaRegion>(MR) ||
+      isa<SymbolicRegion>(MR) ||
+      isa<CodeTextRegion>(MR)) {
+    if (T.isNull()) {
+      if (const TypedRegion *TR = dyn_cast<TypedRegion>(MR))
+        T = TR->getLocationType();
+      else {
+        const SymbolicRegion *SR = cast<SymbolicRegion>(MR);
+        T = SR->getSymbol()->getType();
+      }
+    }
+    MR = GetElementZeroRegion(MR, T);
+  }
+
+  // FIXME: Perhaps this method should just take a 'const MemRegion*' argument
+  //  instead of 'Loc', and have the other Loc cases handled at a higher level.
+  const TypedValueRegion *R = cast<TypedValueRegion>(MR);
+  QualType RTy = R->getValueType();
+
+  // FIXME: we do not yet model the parts of a complex type, so treat the
+  // whole thing as "unknown".
+  if (RTy->isAnyComplexType())
+    return UnknownVal();
+
+  // FIXME: We should eventually handle funny addressing.  e.g.:
+  //
+  //   int x = ...;
+  //   int *p = &x;
+  //   char *q = (char*) p;
+  //   char c = *q;  // returns the first byte of 'x'.
+  //
+  // Such funny addressing will occur due to layering of regions.
+  if (RTy->isStructureOrClassType())
+    return getBindingForStruct(B, R);
+
+  // FIXME: Handle unions.
+  if (RTy->isUnionType())
+    return UnknownVal();
+
+  if (RTy->isArrayType()) {
+    if (RTy->isConstantArrayType())
+      return getBindingForArray(B, R);
+    else
+      return UnknownVal();
+  }
+
+  // FIXME: handle Vector types.
+  if (RTy->isVectorType())
+    return UnknownVal();
+
+  if (const FieldRegion* FR = dyn_cast<FieldRegion>(R))
+    return CastRetrievedVal(getBindingForField(B, FR), FR, T, false);
+
+  if (const ElementRegion* ER = dyn_cast<ElementRegion>(R)) {
+    // FIXME: Here we actually perform an implicit conversion from the loaded
+    // value to the element type.  Eventually we want to compose these values
+    // more intelligently.  For example, an 'element' can encompass multiple
+    // bound regions (e.g., several bound bytes), or could be a subset of
+    // a larger value.
+    return CastRetrievedVal(getBindingForElement(B, ER), ER, T, false);
+  }
+
+  if (const ObjCIvarRegion *IVR = dyn_cast<ObjCIvarRegion>(R)) {
+    // FIXME: Here we actually perform an implicit conversion from the loaded
+    // value to the ivar type.  What we should model is stores to ivars
+    // that blow past the extent of the ivar.  If the address of the ivar is
+    // reinterpretted, it is possible we stored a different value that could
+    // fit within the ivar.  Either we need to cast these when storing them
+    // or reinterpret them lazily (as we do here).
+    return CastRetrievedVal(getBindingForObjCIvar(B, IVR), IVR, T, false);
+  }
+
+  if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
+    // FIXME: Here we actually perform an implicit conversion from the loaded
+    // value to the variable type.  What we should model is stores to variables
+    // that blow past the extent of the variable.  If the address of the
+    // variable is reinterpretted, it is possible we stored a different value
+    // that could fit within the variable.  Either we need to cast these when
+    // storing them or reinterpret them lazily (as we do here).
+    return CastRetrievedVal(getBindingForVar(B, VR), VR, T, false);
+  }
+
+  const SVal *V = B.lookup(R, BindingKey::Direct);
+
+  // Check if the region has a binding.
+  if (V)
+    return *V;
+
+  // The location does not have a bound value.  This means that it has
+  // the value it had upon its creation and/or entry to the analyzed
+  // function/method.  These are either symbolic values or 'undefined'.
+  if (R->hasStackNonParametersStorage()) {
+    // All stack variables are considered to have undefined values
+    // upon creation.  All heap allocated blocks are considered to
+    // have undefined values as well unless they are explicitly bound
+    // to specific values.
+    return UndefinedVal();
+  }
+
+  // All other values are symbolic.
+  return svalBuilder.getRegionValueSymbolVal(R);
+}
+
+static QualType getUnderlyingType(const SubRegion *R) {
+  QualType RegionTy;
+  if (const TypedValueRegion *TVR = dyn_cast<TypedValueRegion>(R))
+    RegionTy = TVR->getValueType();
+
+  if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R))
+    RegionTy = SR->getSymbol()->getType();
+
+  return RegionTy;
+}
+
+/// Checks to see if store \p B has a lazy binding for region \p R.
+///
+/// If \p AllowSubregionBindings is \c false, a lazy binding will be rejected
+/// if there are additional bindings within \p R.
+///
+/// Note that unlike RegionStoreManager::findLazyBinding, this will not search
+/// for lazy bindings for super-regions of \p R.
+static Optional<nonloc::LazyCompoundVal>
+getExistingLazyBinding(SValBuilder &SVB, RegionBindingsConstRef B,
+                       const SubRegion *R, bool AllowSubregionBindings) {
+  Optional<SVal> V = B.getDefaultBinding(R);
+  if (!V)
+    return None;
+
+  Optional<nonloc::LazyCompoundVal> LCV = V->getAs<nonloc::LazyCompoundVal>();
+  if (!LCV)
+    return None;
+
+  // If the LCV is for a subregion, the types might not match, and we shouldn't
+  // reuse the binding.
+  QualType RegionTy = getUnderlyingType(R);
+  if (!RegionTy.isNull() &&
+      !RegionTy->isVoidPointerType()) {
+    QualType SourceRegionTy = LCV->getRegion()->getValueType();
+    if (!SVB.getContext().hasSameUnqualifiedType(RegionTy, SourceRegionTy))
+      return None;
+  }
+
+  if (!AllowSubregionBindings) {
+    // If there are any other bindings within this region, we shouldn't reuse
+    // the top-level binding.
+    SmallVector<BindingPair, 16> Bindings;
+    collectSubRegionBindings(Bindings, SVB, *B.lookup(R->getBaseRegion()), R,
+                             /*IncludeAllDefaultBindings=*/true);
+    if (Bindings.size() > 1)
+      return None;
+  }
+
+  return *LCV;
+}
+
+
+std::pair<Store, const SubRegion *>
+RegionStoreManager::findLazyBinding(RegionBindingsConstRef B,
+                                   const SubRegion *R,
+                                   const SubRegion *originalRegion) {
+  if (originalRegion != R) {
+    if (Optional<nonloc::LazyCompoundVal> V =
+          getExistingLazyBinding(svalBuilder, B, R, true))
+      return std::make_pair(V->getStore(), V->getRegion());
+  }
+
+  typedef std::pair<Store, const SubRegion *> StoreRegionPair;
+  StoreRegionPair Result = StoreRegionPair();
+
+  if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
+    Result = findLazyBinding(B, cast<SubRegion>(ER->getSuperRegion()),
+                             originalRegion);
+
+    if (Result.second)
+      Result.second = MRMgr.getElementRegionWithSuper(ER, Result.second);
+
+  } else if (const FieldRegion *FR = dyn_cast<FieldRegion>(R)) {
+    Result = findLazyBinding(B, cast<SubRegion>(FR->getSuperRegion()),
+                                       originalRegion);
+
+    if (Result.second)
+      Result.second = MRMgr.getFieldRegionWithSuper(FR, Result.second);
+
+  } else if (const CXXBaseObjectRegion *BaseReg =
+               dyn_cast<CXXBaseObjectRegion>(R)) {
+    // C++ base object region is another kind of region that we should blast
+    // through to look for lazy compound value. It is like a field region.
+    Result = findLazyBinding(B, cast<SubRegion>(BaseReg->getSuperRegion()),
+                             originalRegion);
+    
+    if (Result.second)
+      Result.second = MRMgr.getCXXBaseObjectRegionWithSuper(BaseReg,
+                                                            Result.second);
+  }
+
+  return Result;
+}
+
+SVal RegionStoreManager::getBindingForElement(RegionBindingsConstRef B,
+                                              const ElementRegion* R) {
+  // We do not currently model bindings of the CompoundLiteralregion.
+  if (isa<CompoundLiteralRegion>(R->getBaseRegion()))
+    return UnknownVal();
+
+  // Check if the region has a binding.
+  if (const Optional<SVal> &V = B.getDirectBinding(R))
+    return *V;
+
+  const MemRegion* superR = R->getSuperRegion();
+
+  // Check if the region is an element region of a string literal.
+  if (const StringRegion *StrR=dyn_cast<StringRegion>(superR)) {
+    // FIXME: Handle loads from strings where the literal is treated as
+    // an integer, e.g., *((unsigned int*)"hello")
+    QualType T = Ctx.getAsArrayType(StrR->getValueType())->getElementType();
+    if (T != Ctx.getCanonicalType(R->getElementType()))
+      return UnknownVal();
+
+    const StringLiteral *Str = StrR->getStringLiteral();
+    SVal Idx = R->getIndex();
+    if (Optional<nonloc::ConcreteInt> CI = Idx.getAs<nonloc::ConcreteInt>()) {
+      int64_t i = CI->getValue().getSExtValue();
+      // Abort on string underrun.  This can be possible by arbitrary
+      // clients of getBindingForElement().
+      if (i < 0)
+        return UndefinedVal();
+      int64_t length = Str->getLength();
+      // Technically, only i == length is guaranteed to be null.
+      // However, such overflows should be caught before reaching this point;
+      // the only time such an access would be made is if a string literal was
+      // used to initialize a larger array.
+      char c = (i >= length) ? '\0' : Str->getCodeUnit(i);
+      return svalBuilder.makeIntVal(c, T);
+    }
+  }
+  
+  // Check for loads from a code text region.  For such loads, just give up.
+  if (isa<CodeTextRegion>(superR))
+    return UnknownVal();
+
+  // Handle the case where we are indexing into a larger scalar object.
+  // For example, this handles:
+  //   int x = ...
+  //   char *y = &x;
+  //   return *y;
+  // FIXME: This is a hack, and doesn't do anything really intelligent yet.
+  const RegionRawOffset &O = R->getAsArrayOffset();
+  
+  // If we cannot reason about the offset, return an unknown value.
+  if (!O.getRegion())
+    return UnknownVal();
+  
+  if (const TypedValueRegion *baseR = 
+        dyn_cast_or_null<TypedValueRegion>(O.getRegion())) {
+    QualType baseT = baseR->getValueType();
+    if (baseT->isScalarType()) {
+      QualType elemT = R->getElementType();
+      if (elemT->isScalarType()) {
+        if (Ctx.getTypeSizeInChars(baseT) >= Ctx.getTypeSizeInChars(elemT)) {
+          if (const Optional<SVal> &V = B.getDirectBinding(superR)) {
+            if (SymbolRef parentSym = V->getAsSymbol())
+              return svalBuilder.getDerivedRegionValueSymbolVal(parentSym, R);
+
+            if (V->isUnknownOrUndef())
+              return *V;
+            // Other cases: give up.  We are indexing into a larger object
+            // that has some value, but we don't know how to handle that yet.
+            return UnknownVal();
+          }
+        }
+      }
+    }
+  }
+  return getBindingForFieldOrElementCommon(B, R, R->getElementType());
+}
+
+SVal RegionStoreManager::getBindingForField(RegionBindingsConstRef B,
+                                            const FieldRegion* R) {
+
+  // Check if the region has a binding.
+  if (const Optional<SVal> &V = B.getDirectBinding(R))
+    return *V;
+
+  QualType Ty = R->getValueType();
+  return getBindingForFieldOrElementCommon(B, R, Ty);
+}
+
+Optional<SVal>
+RegionStoreManager::getBindingForDerivedDefaultValue(RegionBindingsConstRef B,
+                                                     const MemRegion *superR,
+                                                     const TypedValueRegion *R,
+                                                     QualType Ty) {
+
+  if (const Optional<SVal> &D = B.getDefaultBinding(superR)) {
+    const SVal &val = D.getValue();
+    if (SymbolRef parentSym = val.getAsSymbol())
+      return svalBuilder.getDerivedRegionValueSymbolVal(parentSym, R);
+
+    if (val.isZeroConstant())
+      return svalBuilder.makeZeroVal(Ty);
+
+    if (val.isUnknownOrUndef())
+      return val;
+
+    // Lazy bindings are usually handled through getExistingLazyBinding().
+    // We should unify these two code paths at some point.
+    if (val.getAs<nonloc::LazyCompoundVal>())
+      return val;
+
+    llvm_unreachable("Unknown default value");
+  }
+
+  return None;
+}
+
+SVal RegionStoreManager::getLazyBinding(const SubRegion *LazyBindingRegion,
+                                        RegionBindingsRef LazyBinding) {
+  SVal Result;
+  if (const ElementRegion *ER = dyn_cast<ElementRegion>(LazyBindingRegion))
+    Result = getBindingForElement(LazyBinding, ER);
+  else
+    Result = getBindingForField(LazyBinding,
+                                cast<FieldRegion>(LazyBindingRegion));
+
+  // FIXME: This is a hack to deal with RegionStore's inability to distinguish a
+  // default value for /part/ of an aggregate from a default value for the
+  // /entire/ aggregate. The most common case of this is when struct Outer
+  // has as its first member a struct Inner, which is copied in from a stack
+  // variable. In this case, even if the Outer's default value is symbolic, 0,
+  // or unknown, it gets overridden by the Inner's default value of undefined.
+  //
+  // This is a general problem -- if the Inner is zero-initialized, the Outer
+  // will now look zero-initialized. The proper way to solve this is with a
+  // new version of RegionStore that tracks the extent of a binding as well
+  // as the offset.
+  //
+  // This hack only takes care of the undefined case because that can very
+  // quickly result in a warning.
+  if (Result.isUndef())
+    Result = UnknownVal();
+
+  return Result;
+}
+                                        
+SVal
+RegionStoreManager::getBindingForFieldOrElementCommon(RegionBindingsConstRef B,
+                                                      const TypedValueRegion *R,
+                                                      QualType Ty) {
+
+  // At this point we have already checked in either getBindingForElement or
+  // getBindingForField if 'R' has a direct binding.
+
+  // Lazy binding?
+  Store lazyBindingStore = NULL;
+  const SubRegion *lazyBindingRegion = NULL;
+  llvm::tie(lazyBindingStore, lazyBindingRegion) = findLazyBinding(B, R, R);
+  if (lazyBindingRegion)
+    return getLazyBinding(lazyBindingRegion,
+                          getRegionBindings(lazyBindingStore));
+
+  // Record whether or not we see a symbolic index.  That can completely
+  // be out of scope of our lookup.
+  bool hasSymbolicIndex = false;
+
+  // FIXME: This is a hack to deal with RegionStore's inability to distinguish a
+  // default value for /part/ of an aggregate from a default value for the
+  // /entire/ aggregate. The most common case of this is when struct Outer
+  // has as its first member a struct Inner, which is copied in from a stack
+  // variable. In this case, even if the Outer's default value is symbolic, 0,
+  // or unknown, it gets overridden by the Inner's default value of undefined.
+  //
+  // This is a general problem -- if the Inner is zero-initialized, the Outer
+  // will now look zero-initialized. The proper way to solve this is with a
+  // new version of RegionStore that tracks the extent of a binding as well
+  // as the offset.
+  //
+  // This hack only takes care of the undefined case because that can very
+  // quickly result in a warning.
+  bool hasPartialLazyBinding = false;
+
+  const SubRegion *SR = dyn_cast<SubRegion>(R);
+  while (SR) {
+    const MemRegion *Base = SR->getSuperRegion();
+    if (Optional<SVal> D = getBindingForDerivedDefaultValue(B, Base, R, Ty)) {
+      if (D->getAs<nonloc::LazyCompoundVal>()) {
+        hasPartialLazyBinding = true;
+        break;
+      }
+
+      return *D;
+    }
+
+    if (const ElementRegion *ER = dyn_cast<ElementRegion>(Base)) {
+      NonLoc index = ER->getIndex();
+      if (!index.isConstant())
+        hasSymbolicIndex = true;
+    }
+    
+    // If our super region is a field or element itself, walk up the region
+    // hierarchy to see if there is a default value installed in an ancestor.
+    SR = dyn_cast<SubRegion>(Base);
+  }
+
+  if (R->hasStackNonParametersStorage()) {
+    if (isa<ElementRegion>(R)) {
+      // Currently we don't reason specially about Clang-style vectors.  Check
+      // if superR is a vector and if so return Unknown.
+      if (const TypedValueRegion *typedSuperR = 
+            dyn_cast<TypedValueRegion>(R->getSuperRegion())) {
+        if (typedSuperR->getValueType()->isVectorType())
+          return UnknownVal();
+      }
+    }
+
+    // FIXME: We also need to take ElementRegions with symbolic indexes into
+    // account.  This case handles both directly accessing an ElementRegion
+    // with a symbolic offset, but also fields within an element with
+    // a symbolic offset.
+    if (hasSymbolicIndex)
+      return UnknownVal();
+
+    if (!hasPartialLazyBinding)
+      return UndefinedVal();
+  }
+
+  // All other values are symbolic.
+  return svalBuilder.getRegionValueSymbolVal(R);
+}
+
+SVal RegionStoreManager::getBindingForObjCIvar(RegionBindingsConstRef B,
+                                               const ObjCIvarRegion* R) {
+  // Check if the region has a binding.
+  if (const Optional<SVal> &V = B.getDirectBinding(R))
+    return *V;
+
+  const MemRegion *superR = R->getSuperRegion();
+
+  // Check if the super region has a default binding.
+  if (const Optional<SVal> &V = B.getDefaultBinding(superR)) {
+    if (SymbolRef parentSym = V->getAsSymbol())
+      return svalBuilder.getDerivedRegionValueSymbolVal(parentSym, R);
+
+    // Other cases: give up.
+    return UnknownVal();
+  }
+
+  return getBindingForLazySymbol(R);
+}
+
+SVal RegionStoreManager::getBindingForVar(RegionBindingsConstRef B,
+                                          const VarRegion *R) {
+
+  // Check if the region has a binding.
+  if (const Optional<SVal> &V = B.getDirectBinding(R))
+    return *V;
+
+  // Lazily derive a value for the VarRegion.
+  const VarDecl *VD = R->getDecl();
+  const MemSpaceRegion *MS = R->getMemorySpace();
+
+  // Arguments are always symbolic.
+  if (isa<StackArgumentsSpaceRegion>(MS))
+    return svalBuilder.getRegionValueSymbolVal(R);
+
+  // Is 'VD' declared constant?  If so, retrieve the constant value.
+  if (VD->getType().isConstQualified())
+    if (const Expr *Init = VD->getInit())
+      if (Optional<SVal> V = svalBuilder.getConstantVal(Init))
+        return *V;
+
+  // This must come after the check for constants because closure-captured
+  // constant variables may appear in UnknownSpaceRegion.
+  if (isa<UnknownSpaceRegion>(MS))
+    return svalBuilder.getRegionValueSymbolVal(R);
+
+  if (isa<GlobalsSpaceRegion>(MS)) {
+    QualType T = VD->getType();
+
+    // Function-scoped static variables are default-initialized to 0; if they
+    // have an initializer, it would have been processed by now.
+    if (isa<StaticGlobalSpaceRegion>(MS))
+      return svalBuilder.makeZeroVal(T);
+
+    if (Optional<SVal> V = getBindingForDerivedDefaultValue(B, MS, R, T)) {
+      assert(!V->getAs<nonloc::LazyCompoundVal>());
+      return V.getValue();
+    }
+
+    return svalBuilder.getRegionValueSymbolVal(R);
+  }
+
+  return UndefinedVal();
+}
+
+SVal RegionStoreManager::getBindingForLazySymbol(const TypedValueRegion *R) {
+  // All other values are symbolic.
+  return svalBuilder.getRegionValueSymbolVal(R);
+}
+
+const RegionStoreManager::SValListTy &
+RegionStoreManager::getInterestingValues(nonloc::LazyCompoundVal LCV) {
+  // First, check the cache.
+  LazyBindingsMapTy::iterator I = LazyBindingsMap.find(LCV.getCVData());
+  if (I != LazyBindingsMap.end())
+    return I->second;
+
+  // If we don't have a list of values cached, start constructing it.
+  SValListTy List;
+
+  const SubRegion *LazyR = LCV.getRegion();
+  RegionBindingsRef B = getRegionBindings(LCV.getStore());
+
+  // If this region had /no/ bindings at the time, there are no interesting
+  // values to return.
+  const ClusterBindings *Cluster = B.lookup(LazyR->getBaseRegion());
+  if (!Cluster)
+    return (LazyBindingsMap[LCV.getCVData()] = llvm_move(List));
+
+  SmallVector<BindingPair, 32> Bindings;
+  collectSubRegionBindings(Bindings, svalBuilder, *Cluster, LazyR,
+                           /*IncludeAllDefaultBindings=*/true);
+  for (SmallVectorImpl<BindingPair>::const_iterator I = Bindings.begin(),
+                                                    E = Bindings.end();
+       I != E; ++I) {
+    SVal V = I->second;
+    if (V.isUnknownOrUndef() || V.isConstant())
+      continue;
+
+    if (Optional<nonloc::LazyCompoundVal> InnerLCV =
+            V.getAs<nonloc::LazyCompoundVal>()) {
+      const SValListTy &InnerList = getInterestingValues(*InnerLCV);
+      List.insert(List.end(), InnerList.begin(), InnerList.end());
+      continue;
+    }
+    
+    List.push_back(V);
+  }
+
+  return (LazyBindingsMap[LCV.getCVData()] = llvm_move(List));
+}
+
+NonLoc RegionStoreManager::createLazyBinding(RegionBindingsConstRef B,
+                                             const TypedValueRegion *R) {
+  if (Optional<nonloc::LazyCompoundVal> V =
+        getExistingLazyBinding(svalBuilder, B, R, false))
+    return *V;
+
+  return svalBuilder.makeLazyCompoundVal(StoreRef(B.asStore(), *this), R);
+}
+
+SVal RegionStoreManager::getBindingForStruct(RegionBindingsConstRef B,
+                                             const TypedValueRegion *R) {
+  const RecordDecl *RD = R->getValueType()->castAs<RecordType>()->getDecl();
+  if (RD->field_empty())
+    return UnknownVal();
+
+  return createLazyBinding(B, R);
+}
+
+SVal RegionStoreManager::getBindingForArray(RegionBindingsConstRef B,
+                                            const TypedValueRegion *R) {
+  assert(Ctx.getAsConstantArrayType(R->getValueType()) &&
+         "Only constant array types can have compound bindings.");
+  
+  return createLazyBinding(B, R);
+}
+
+bool RegionStoreManager::includedInBindings(Store store,
+                                            const MemRegion *region) const {
+  RegionBindingsRef B = getRegionBindings(store);
+  region = region->getBaseRegion();
+
+  // Quick path: if the base is the head of a cluster, the region is live.
+  if (B.lookup(region))
+    return true;
+
+  // Slow path: if the region is the VALUE of any binding, it is live.
+  for (RegionBindingsRef::iterator RI = B.begin(), RE = B.end(); RI != RE; ++RI) {
+    const ClusterBindings &Cluster = RI.getData();
+    for (ClusterBindings::iterator CI = Cluster.begin(), CE = Cluster.end();
+         CI != CE; ++CI) {
+      const SVal &D = CI.getData();
+      if (const MemRegion *R = D.getAsRegion())
+        if (R->getBaseRegion() == region)
+          return true;
+    }
+  }
+
+  return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Binding values to regions.
+//===----------------------------------------------------------------------===//
+
+StoreRef RegionStoreManager::killBinding(Store ST, Loc L) {
+  if (Optional<loc::MemRegionVal> LV = L.getAs<loc::MemRegionVal>())
+    if (const MemRegion* R = LV->getRegion())
+      return StoreRef(getRegionBindings(ST).removeBinding(R)
+                                           .asImmutableMap()
+                                           .getRootWithoutRetain(),
+                      *this);
+
+  return StoreRef(ST, *this);
+}
+
+RegionBindingsRef
+RegionStoreManager::bind(RegionBindingsConstRef B, Loc L, SVal V) {
+  if (L.getAs<loc::ConcreteInt>())
+    return B;
+
+  // If we get here, the location should be a region.
+  const MemRegion *R = L.castAs<loc::MemRegionVal>().getRegion();
+
+  // Check if the region is a struct region.
+  if (const TypedValueRegion* TR = dyn_cast<TypedValueRegion>(R)) {
+    QualType Ty = TR->getValueType();
+    if (Ty->isArrayType())
+      return bindArray(B, TR, V);
+    if (Ty->isStructureOrClassType())
+      return bindStruct(B, TR, V);
+    if (Ty->isVectorType())
+      return bindVector(B, TR, V);
+  }
+
+  if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R)) {
+    // Binding directly to a symbolic region should be treated as binding
+    // to element 0.
+    QualType T = SR->getSymbol()->getType();
+    if (T->isAnyPointerType() || T->isReferenceType())
+      T = T->getPointeeType();
+
+    R = GetElementZeroRegion(SR, T);
+  }
+
+  // Clear out bindings that may overlap with this binding.
+  RegionBindingsRef NewB = removeSubRegionBindings(B, cast<SubRegion>(R));
+  return NewB.addBinding(BindingKey::Make(R, BindingKey::Direct), V);
+}
+
+RegionBindingsRef
+RegionStoreManager::setImplicitDefaultValue(RegionBindingsConstRef B,
+                                            const MemRegion *R,
+                                            QualType T) {
+  SVal V;
+
+  if (Loc::isLocType(T))
+    V = svalBuilder.makeNull();
+  else if (T->isIntegralOrEnumerationType())
+    V = svalBuilder.makeZeroVal(T);
+  else if (T->isStructureOrClassType() || T->isArrayType()) {
+    // Set the default value to a zero constant when it is a structure
+    // or array.  The type doesn't really matter.
+    V = svalBuilder.makeZeroVal(Ctx.IntTy);
+  }
+  else {
+    // We can't represent values of this type, but we still need to set a value
+    // to record that the region has been initialized.
+    // If this assertion ever fires, a new case should be added above -- we
+    // should know how to default-initialize any value we can symbolicate.
+    assert(!SymbolManager::canSymbolicate(T) && "This type is representable");
+    V = UnknownVal();
+  }
+
+  return B.addBinding(R, BindingKey::Default, V);
+}
+
+RegionBindingsRef
+RegionStoreManager::bindArray(RegionBindingsConstRef B,
+                              const TypedValueRegion* R,
+                              SVal Init) {
+
+  const ArrayType *AT =cast<ArrayType>(Ctx.getCanonicalType(R->getValueType()));
+  QualType ElementTy = AT->getElementType();
+  Optional<uint64_t> Size;
+
+  if (const ConstantArrayType* CAT = dyn_cast<ConstantArrayType>(AT))
+    Size = CAT->getSize().getZExtValue();
+
+  // Check if the init expr is a string literal.
+  if (Optional<loc::MemRegionVal> MRV = Init.getAs<loc::MemRegionVal>()) {
+    const StringRegion *S = cast<StringRegion>(MRV->getRegion());
+
+    // Treat the string as a lazy compound value.
+    StoreRef store(B.asStore(), *this);
+    nonloc::LazyCompoundVal LCV = svalBuilder.makeLazyCompoundVal(store, S)
+        .castAs<nonloc::LazyCompoundVal>();
+    return bindAggregate(B, R, LCV);
+  }
+
+  // Handle lazy compound values.
+  if (Init.getAs<nonloc::LazyCompoundVal>())
+    return bindAggregate(B, R, Init);
+
+  // Remaining case: explicit compound values.
+
+  if (Init.isUnknown())
+    return setImplicitDefaultValue(B, R, ElementTy);
+
+  const nonloc::CompoundVal& CV = Init.castAs<nonloc::CompoundVal>();
+  nonloc::CompoundVal::iterator VI = CV.begin(), VE = CV.end();
+  uint64_t i = 0;
+
+  RegionBindingsRef NewB(B);
+
+  for (; Size.hasValue() ? i < Size.getValue() : true ; ++i, ++VI) {
+    // The init list might be shorter than the array length.
+    if (VI == VE)
+      break;
+
+    const NonLoc &Idx = svalBuilder.makeArrayIndex(i);
+    const ElementRegion *ER = MRMgr.getElementRegion(ElementTy, Idx, R, Ctx);
+
+    if (ElementTy->isStructureOrClassType())
+      NewB = bindStruct(NewB, ER, *VI);
+    else if (ElementTy->isArrayType())
+      NewB = bindArray(NewB, ER, *VI);
+    else
+      NewB = bind(NewB, loc::MemRegionVal(ER), *VI);
+  }
+
+  // If the init list is shorter than the array length, set the
+  // array default value.
+  if (Size.hasValue() && i < Size.getValue())
+    NewB = setImplicitDefaultValue(NewB, R, ElementTy);
+
+  return NewB;
+}
+
+RegionBindingsRef RegionStoreManager::bindVector(RegionBindingsConstRef B,
+                                                 const TypedValueRegion* R,
+                                                 SVal V) {
+  QualType T = R->getValueType();
+  assert(T->isVectorType());
+  const VectorType *VT = T->getAs<VectorType>(); // Use getAs for typedefs.
+ 
+  // Handle lazy compound values and symbolic values.
+  if (V.getAs<nonloc::LazyCompoundVal>() || V.getAs<nonloc::SymbolVal>())
+    return bindAggregate(B, R, V);
+  
+  // We may get non-CompoundVal accidentally due to imprecise cast logic or
+  // that we are binding symbolic struct value. Kill the field values, and if
+  // the value is symbolic go and bind it as a "default" binding.
+  if (!V.getAs<nonloc::CompoundVal>()) {
+    return bindAggregate(B, R, UnknownVal());
+  }
+
+  QualType ElemType = VT->getElementType();
+  nonloc::CompoundVal CV = V.castAs<nonloc::CompoundVal>();
+  nonloc::CompoundVal::iterator VI = CV.begin(), VE = CV.end();
+  unsigned index = 0, numElements = VT->getNumElements();
+  RegionBindingsRef NewB(B);
+
+  for ( ; index != numElements ; ++index) {
+    if (VI == VE)
+      break;
+    
+    NonLoc Idx = svalBuilder.makeArrayIndex(index);
+    const ElementRegion *ER = MRMgr.getElementRegion(ElemType, Idx, R, Ctx);
+
+    if (ElemType->isArrayType())
+      NewB = bindArray(NewB, ER, *VI);
+    else if (ElemType->isStructureOrClassType())
+      NewB = bindStruct(NewB, ER, *VI);
+    else
+      NewB = bind(NewB, loc::MemRegionVal(ER), *VI);
+  }
+  return NewB;
+}
+
+Optional<RegionBindingsRef>
+RegionStoreManager::tryBindSmallStruct(RegionBindingsConstRef B,
+                                       const TypedValueRegion *R,
+                                       const RecordDecl *RD,
+                                       nonloc::LazyCompoundVal LCV) {
+  FieldVector Fields;
+
+  if (const CXXRecordDecl *Class = dyn_cast<CXXRecordDecl>(RD))
+    if (Class->getNumBases() != 0 || Class->getNumVBases() != 0)
+      return None;
+
+  for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
+       I != E; ++I) {
+    const FieldDecl *FD = *I;
+    if (FD->isUnnamedBitfield())
+      continue;
+
+    // If there are too many fields, or if any of the fields are aggregates,
+    // just use the LCV as a default binding.
+    if (Fields.size() == SmallStructLimit)
+      return None;
+
+    QualType Ty = FD->getType();
+    if (!(Ty->isScalarType() || Ty->isReferenceType()))
+      return None;
+
+    Fields.push_back(*I);
+  }
+
+  RegionBindingsRef NewB = B;
+  
+  for (FieldVector::iterator I = Fields.begin(), E = Fields.end(); I != E; ++I){
+    const FieldRegion *SourceFR = MRMgr.getFieldRegion(*I, LCV.getRegion());
+    SVal V = getBindingForField(getRegionBindings(LCV.getStore()), SourceFR);
+
+    const FieldRegion *DestFR = MRMgr.getFieldRegion(*I, R);
+    NewB = bind(NewB, loc::MemRegionVal(DestFR), V);
+  }
+
+  return NewB;
+}
+
+RegionBindingsRef RegionStoreManager::bindStruct(RegionBindingsConstRef B,
+                                                 const TypedValueRegion* R,
+                                                 SVal V) {
+  if (!Features.supportsFields())
+    return B;
+
+  QualType T = R->getValueType();
+  assert(T->isStructureOrClassType());
+
+  const RecordType* RT = T->getAs<RecordType>();
+  const RecordDecl *RD = RT->getDecl();
+
+  if (!RD->isCompleteDefinition())
+    return B;
+
+  // Handle lazy compound values and symbolic values.
+  if (Optional<nonloc::LazyCompoundVal> LCV =
+        V.getAs<nonloc::LazyCompoundVal>()) {
+    if (Optional<RegionBindingsRef> NewB = tryBindSmallStruct(B, R, RD, *LCV))
+      return *NewB;
+    return bindAggregate(B, R, V);
+  }
+  if (V.getAs<nonloc::SymbolVal>())
+    return bindAggregate(B, R, V);
+
+  // We may get non-CompoundVal accidentally due to imprecise cast logic or
+  // that we are binding symbolic struct value. Kill the field values, and if
+  // the value is symbolic go and bind it as a "default" binding.
+  if (V.isUnknown() || !V.getAs<nonloc::CompoundVal>())
+    return bindAggregate(B, R, UnknownVal());
+
+  const nonloc::CompoundVal& CV = V.castAs<nonloc::CompoundVal>();
+  nonloc::CompoundVal::iterator VI = CV.begin(), VE = CV.end();
+
+  RecordDecl::field_iterator FI, FE;
+  RegionBindingsRef NewB(B);
+
+  for (FI = RD->field_begin(), FE = RD->field_end(); FI != FE; ++FI) {
+
+    if (VI == VE)
+      break;
+
+    // Skip any unnamed bitfields to stay in sync with the initializers.
+    if (FI->isUnnamedBitfield())
+      continue;
+
+    QualType FTy = FI->getType();
+    const FieldRegion* FR = MRMgr.getFieldRegion(*FI, R);
+
+    if (FTy->isArrayType())
+      NewB = bindArray(NewB, FR, *VI);
+    else if (FTy->isStructureOrClassType())
+      NewB = bindStruct(NewB, FR, *VI);
+    else
+      NewB = bind(NewB, loc::MemRegionVal(FR), *VI);
+    ++VI;
+  }
+
+  // There may be fewer values in the initialize list than the fields of struct.
+  if (FI != FE) {
+    NewB = NewB.addBinding(R, BindingKey::Default,
+                           svalBuilder.makeIntVal(0, false));
+  }
+
+  return NewB;
+}
+
+RegionBindingsRef
+RegionStoreManager::bindAggregate(RegionBindingsConstRef B,
+                                  const TypedRegion *R,
+                                  SVal Val) {
+  // Remove the old bindings, using 'R' as the root of all regions
+  // we will invalidate. Then add the new binding.
+  return removeSubRegionBindings(B, R).addBinding(R, BindingKey::Default, Val);
+}
+
+//===----------------------------------------------------------------------===//
+// State pruning.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class removeDeadBindingsWorker :
+  public ClusterAnalysis<removeDeadBindingsWorker> {
+  SmallVector<const SymbolicRegion*, 12> Postponed;
+  SymbolReaper &SymReaper;
+  const StackFrameContext *CurrentLCtx;
+
+public:
+  removeDeadBindingsWorker(RegionStoreManager &rm,
+                           ProgramStateManager &stateMgr,
+                           RegionBindingsRef b, SymbolReaper &symReaper,
+                           const StackFrameContext *LCtx)
+    : ClusterAnalysis<removeDeadBindingsWorker>(rm, stateMgr, b, GFK_None),
+      SymReaper(symReaper), CurrentLCtx(LCtx) {}
+
+  // Called by ClusterAnalysis.
+  void VisitAddedToCluster(const MemRegion *baseR, const ClusterBindings &C);
+  void VisitCluster(const MemRegion *baseR, const ClusterBindings *C);
+  using ClusterAnalysis<removeDeadBindingsWorker>::VisitCluster;
+
+  bool UpdatePostponed();
+  void VisitBinding(SVal V);
+};
+}
+
+void removeDeadBindingsWorker::VisitAddedToCluster(const MemRegion *baseR,
+                                                   const ClusterBindings &C) {
+
+  if (const VarRegion *VR = dyn_cast<VarRegion>(baseR)) {
+    if (SymReaper.isLive(VR))
+      AddToWorkList(baseR, &C);
+
+    return;
+  }
+
+  if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(baseR)) {
+    if (SymReaper.isLive(SR->getSymbol()))
+      AddToWorkList(SR, &C);
+    else
+      Postponed.push_back(SR);
+
+    return;
+  }
+
+  if (isa<NonStaticGlobalSpaceRegion>(baseR)) {
+    AddToWorkList(baseR, &C);
+    return;
+  }
+
+  // CXXThisRegion in the current or parent location context is live.
+  if (const CXXThisRegion *TR = dyn_cast<CXXThisRegion>(baseR)) {
+    const StackArgumentsSpaceRegion *StackReg =
+      cast<StackArgumentsSpaceRegion>(TR->getSuperRegion());
+    const StackFrameContext *RegCtx = StackReg->getStackFrame();
+    if (CurrentLCtx &&
+        (RegCtx == CurrentLCtx || RegCtx->isParentOf(CurrentLCtx)))
+      AddToWorkList(TR, &C);
+  }
+}
+
+void removeDeadBindingsWorker::VisitCluster(const MemRegion *baseR,
+                                            const ClusterBindings *C) {
+  if (!C)
+    return;
+
+  // Mark the symbol for any SymbolicRegion with live bindings as live itself.
+  // This means we should continue to track that symbol.
+  if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(baseR))
+    SymReaper.markLive(SymR->getSymbol());
+
+  for (ClusterBindings::iterator I = C->begin(), E = C->end(); I != E; ++I)
+    VisitBinding(I.getData());
+}
+
+void removeDeadBindingsWorker::VisitBinding(SVal V) {
+  // Is it a LazyCompoundVal?  All referenced regions are live as well.
+  if (Optional<nonloc::LazyCompoundVal> LCS =
+          V.getAs<nonloc::LazyCompoundVal>()) {
+
+    const RegionStoreManager::SValListTy &Vals = RM.getInterestingValues(*LCS);
+
+    for (RegionStoreManager::SValListTy::const_iterator I = Vals.begin(),
+                                                        E = Vals.end();
+         I != E; ++I)
+      VisitBinding(*I);
+
+    return;
+  }
+
+  // If V is a region, then add it to the worklist.
+  if (const MemRegion *R = V.getAsRegion()) {
+    AddToWorkList(R);
+    
+    // All regions captured by a block are also live.
+    if (const BlockDataRegion *BR = dyn_cast<BlockDataRegion>(R)) {
+      BlockDataRegion::referenced_vars_iterator I = BR->referenced_vars_begin(),
+                                                E = BR->referenced_vars_end();
+      for ( ; I != E; ++I)
+        AddToWorkList(I.getCapturedRegion());
+    }
+  }
+    
+
+  // Update the set of live symbols.
+  for (SymExpr::symbol_iterator SI = V.symbol_begin(), SE = V.symbol_end();
+       SI!=SE; ++SI)
+    SymReaper.markLive(*SI);
+}
+
+bool removeDeadBindingsWorker::UpdatePostponed() {
+  // See if any postponed SymbolicRegions are actually live now, after
+  // having done a scan.
+  bool changed = false;
+
+  for (SmallVectorImpl<const SymbolicRegion*>::iterator
+        I = Postponed.begin(), E = Postponed.end() ; I != E ; ++I) {
+    if (const SymbolicRegion *SR = *I) {
+      if (SymReaper.isLive(SR->getSymbol())) {
+        changed |= AddToWorkList(SR);
+        *I = NULL;
+      }
+    }
+  }
+
+  return changed;
+}
+
+StoreRef RegionStoreManager::removeDeadBindings(Store store,
+                                                const StackFrameContext *LCtx,
+                                                SymbolReaper& SymReaper) {
+  RegionBindingsRef B = getRegionBindings(store);
+  removeDeadBindingsWorker W(*this, StateMgr, B, SymReaper, LCtx);
+  W.GenerateClusters();
+
+  // Enqueue the region roots onto the worklist.
+  for (SymbolReaper::region_iterator I = SymReaper.region_begin(),
+       E = SymReaper.region_end(); I != E; ++I) {
+    W.AddToWorkList(*I);
+  }
+
+  do W.RunWorkList(); while (W.UpdatePostponed());
+
+  // We have now scanned the store, marking reachable regions and symbols
+  // as live.  We now remove all the regions that are dead from the store
+  // as well as update DSymbols with the set symbols that are now dead.
+  for (RegionBindingsRef::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+    const MemRegion *Base = I.getKey();
+
+    // If the cluster has been visited, we know the region has been marked.
+    if (W.isVisited(Base))
+      continue;
+
+    // Remove the dead entry.
+    B = B.remove(Base);
+
+    if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(Base))
+      SymReaper.maybeDead(SymR->getSymbol());
+
+    // Mark all non-live symbols that this binding references as dead.
+    const ClusterBindings &Cluster = I.getData();
+    for (ClusterBindings::iterator CI = Cluster.begin(), CE = Cluster.end();
+         CI != CE; ++CI) {
+      SVal X = CI.getData();
+      SymExpr::symbol_iterator SI = X.symbol_begin(), SE = X.symbol_end();
+      for (; SI != SE; ++SI)
+        SymReaper.maybeDead(*SI);
+    }
+  }
+
+  return StoreRef(B.asStore(), *this);
+}
+
+//===----------------------------------------------------------------------===//
+// Utility methods.
+//===----------------------------------------------------------------------===//
+
+void RegionStoreManager::print(Store store, raw_ostream &OS,
+                               const char* nl, const char *sep) {
+  RegionBindingsRef B = getRegionBindings(store);
+  OS << "Store (direct and default bindings), "
+     << B.asStore()
+     << " :" << nl;
+  B.dump(OS, nl);
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
new file mode 100644
index 0000000..9d77a3e
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
@@ -0,0 +1,506 @@
+// SValBuilder.cpp - Basic class for all SValBuilder implementations -*- C++ -*-
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines SValBuilder, the base class for all (complete) SValBuilder
+//  implementations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+
+using namespace clang;
+using namespace ento;
+
+//===----------------------------------------------------------------------===//
+// Basic SVal creation.
+//===----------------------------------------------------------------------===//
+
+void SValBuilder::anchor() { }
+
+DefinedOrUnknownSVal SValBuilder::makeZeroVal(QualType type) {
+  if (Loc::isLocType(type))
+    return makeNull();
+
+  if (type->isIntegralOrEnumerationType())
+    return makeIntVal(0, type);
+
+  // FIXME: Handle floats.
+  // FIXME: Handle structs.
+  return UnknownVal();
+}
+
+NonLoc SValBuilder::makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
+                                const llvm::APSInt& rhs, QualType type) {
+  // The Environment ensures we always get a persistent APSInt in
+  // BasicValueFactory, so we don't need to get the APSInt from
+  // BasicValueFactory again.
+  assert(lhs);
+  assert(!Loc::isLocType(type));
+  return nonloc::SymbolVal(SymMgr.getSymIntExpr(lhs, op, rhs, type));
+}
+
+NonLoc SValBuilder::makeNonLoc(const llvm::APSInt& lhs,
+                               BinaryOperator::Opcode op, const SymExpr *rhs,
+                               QualType type) {
+  assert(rhs);
+  assert(!Loc::isLocType(type));
+  return nonloc::SymbolVal(SymMgr.getIntSymExpr(lhs, op, rhs, type));
+}
+
+NonLoc SValBuilder::makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
+                               const SymExpr *rhs, QualType type) {
+  assert(lhs && rhs);
+  assert(!Loc::isLocType(type));
+  return nonloc::SymbolVal(SymMgr.getSymSymExpr(lhs, op, rhs, type));
+}
+
+NonLoc SValBuilder::makeNonLoc(const SymExpr *operand,
+                               QualType fromTy, QualType toTy) {
+  assert(operand);
+  assert(!Loc::isLocType(toTy));
+  return nonloc::SymbolVal(SymMgr.getCastSymbol(operand, fromTy, toTy));
+}
+
+SVal SValBuilder::convertToArrayIndex(SVal val) {
+  if (val.isUnknownOrUndef())
+    return val;
+
+  // Common case: we have an appropriately sized integer.
+  if (Optional<nonloc::ConcreteInt> CI = val.getAs<nonloc::ConcreteInt>()) {
+    const llvm::APSInt& I = CI->getValue();
+    if (I.getBitWidth() == ArrayIndexWidth && I.isSigned())
+      return val;
+  }
+
+  return evalCastFromNonLoc(val.castAs<NonLoc>(), ArrayIndexTy);
+}
+
+nonloc::ConcreteInt SValBuilder::makeBoolVal(const CXXBoolLiteralExpr *boolean){
+  return makeTruthVal(boolean->getValue());
+}
+
+DefinedOrUnknownSVal 
+SValBuilder::getRegionValueSymbolVal(const TypedValueRegion* region) {
+  QualType T = region->getValueType();
+
+  if (!SymbolManager::canSymbolicate(T))
+    return UnknownVal();
+
+  SymbolRef sym = SymMgr.getRegionValueSymbol(region);
+
+  if (Loc::isLocType(T))
+    return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
+
+  return nonloc::SymbolVal(sym);
+}
+
+DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const void *SymbolTag,
+                                                   const Expr *Ex,
+                                                   const LocationContext *LCtx,
+                                                   unsigned Count) {
+  QualType T = Ex->getType();
+
+  // Compute the type of the result. If the expression is not an R-value, the
+  // result should be a location.
+  QualType ExType = Ex->getType();
+  if (Ex->isGLValue())
+    T = LCtx->getAnalysisDeclContext()->getASTContext().getPointerType(ExType);
+
+  return conjureSymbolVal(SymbolTag, Ex, LCtx, T, Count);
+}
+
+DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const void *symbolTag,
+                                                   const Expr *expr,
+                                                   const LocationContext *LCtx,
+                                                   QualType type,
+                                                   unsigned count) {
+  if (!SymbolManager::canSymbolicate(type))
+    return UnknownVal();
+
+  SymbolRef sym = SymMgr.conjureSymbol(expr, LCtx, type, count, symbolTag);
+
+  if (Loc::isLocType(type))
+    return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
+
+  return nonloc::SymbolVal(sym);
+}
+
+
+DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const Stmt *stmt,
+                                                   const LocationContext *LCtx,
+                                                   QualType type,
+                                                   unsigned visitCount) {
+  if (!SymbolManager::canSymbolicate(type))
+    return UnknownVal();
+
+  SymbolRef sym = SymMgr.conjureSymbol(stmt, LCtx, type, visitCount);
+  
+  if (Loc::isLocType(type))
+    return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
+  
+  return nonloc::SymbolVal(sym);
+}
+
+DefinedOrUnknownSVal
+SValBuilder::getConjuredHeapSymbolVal(const Expr *E,
+                                      const LocationContext *LCtx,
+                                      unsigned VisitCount) {
+  QualType T = E->getType();
+  assert(Loc::isLocType(T));
+  assert(SymbolManager::canSymbolicate(T));
+
+  SymbolRef sym = SymMgr.conjureSymbol(E, LCtx, T, VisitCount);
+  return loc::MemRegionVal(MemMgr.getSymbolicHeapRegion(sym));
+}
+
+DefinedSVal SValBuilder::getMetadataSymbolVal(const void *symbolTag,
+                                              const MemRegion *region,
+                                              const Expr *expr, QualType type,
+                                              unsigned count) {
+  assert(SymbolManager::canSymbolicate(type) && "Invalid metadata symbol type");
+
+  SymbolRef sym =
+      SymMgr.getMetadataSymbol(region, expr, type, count, symbolTag);
+
+  if (Loc::isLocType(type))
+    return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
+
+  return nonloc::SymbolVal(sym);
+}
+
+DefinedOrUnknownSVal
+SValBuilder::getDerivedRegionValueSymbolVal(SymbolRef parentSymbol,
+                                             const TypedValueRegion *region) {
+  QualType T = region->getValueType();
+
+  if (!SymbolManager::canSymbolicate(T))
+    return UnknownVal();
+
+  SymbolRef sym = SymMgr.getDerivedSymbol(parentSymbol, region);
+
+  if (Loc::isLocType(T))
+    return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
+
+  return nonloc::SymbolVal(sym);
+}
+
+DefinedSVal SValBuilder::getFunctionPointer(const FunctionDecl *func) {
+  return loc::MemRegionVal(MemMgr.getFunctionTextRegion(func));
+}
+
+DefinedSVal SValBuilder::getBlockPointer(const BlockDecl *block,
+                                         CanQualType locTy,
+                                         const LocationContext *locContext) {
+  const BlockTextRegion *BC =
+    MemMgr.getBlockTextRegion(block, locTy, locContext->getAnalysisDeclContext());
+  const BlockDataRegion *BD = MemMgr.getBlockDataRegion(BC, locContext);
+  return loc::MemRegionVal(BD);
+}
+
+/// Return a memory region for the 'this' object reference.
+loc::MemRegionVal SValBuilder::getCXXThis(const CXXMethodDecl *D,
+                                          const StackFrameContext *SFC) {
+  return loc::MemRegionVal(getRegionManager().
+                           getCXXThisRegion(D->getThisType(getContext()), SFC));
+}
+
+/// Return a memory region for the 'this' object reference.
+loc::MemRegionVal SValBuilder::getCXXThis(const CXXRecordDecl *D,
+                                          const StackFrameContext *SFC) {
+  const Type *T = D->getTypeForDecl();
+  QualType PT = getContext().getPointerType(QualType(T, 0));
+  return loc::MemRegionVal(getRegionManager().getCXXThisRegion(PT, SFC));
+}
+
+Optional<SVal> SValBuilder::getConstantVal(const Expr *E) {
+  E = E->IgnoreParens();
+
+  switch (E->getStmtClass()) {
+  // Handle expressions that we treat differently from the AST's constant
+  // evaluator.
+  case Stmt::AddrLabelExprClass:
+    return makeLoc(cast<AddrLabelExpr>(E));
+
+  case Stmt::CXXScalarValueInitExprClass:
+  case Stmt::ImplicitValueInitExprClass:
+    return makeZeroVal(E->getType());
+
+  case Stmt::ObjCStringLiteralClass: {
+    const ObjCStringLiteral *SL = cast<ObjCStringLiteral>(E);
+    return makeLoc(getRegionManager().getObjCStringRegion(SL));
+  }
+
+  case Stmt::StringLiteralClass: {
+    const StringLiteral *SL = cast<StringLiteral>(E);
+    return makeLoc(getRegionManager().getStringRegion(SL));
+  }
+
+  // Fast-path some expressions to avoid the overhead of going through the AST's
+  // constant evaluator
+  case Stmt::CharacterLiteralClass: {
+    const CharacterLiteral *C = cast<CharacterLiteral>(E);
+    return makeIntVal(C->getValue(), C->getType());
+  }
+
+  case Stmt::CXXBoolLiteralExprClass:
+    return makeBoolVal(cast<CXXBoolLiteralExpr>(E));
+
+  case Stmt::IntegerLiteralClass:
+    return makeIntVal(cast<IntegerLiteral>(E));
+
+  case Stmt::ObjCBoolLiteralExprClass:
+    return makeBoolVal(cast<ObjCBoolLiteralExpr>(E));
+
+  case Stmt::CXXNullPtrLiteralExprClass:
+    return makeNull();
+
+  // If we don't have a special case, fall back to the AST's constant evaluator.
+  default: {
+    // Don't try to come up with a value for materialized temporaries.
+    if (E->isGLValue())
+      return None;
+
+    ASTContext &Ctx = getContext();
+    llvm::APSInt Result;
+    if (E->EvaluateAsInt(Result, Ctx))
+      return makeIntVal(Result);
+
+    if (Loc::isLocType(E->getType()))
+      if (E->isNullPointerConstant(Ctx, Expr::NPC_ValueDependentIsNotNull))
+        return makeNull();
+
+    return None;
+  }
+  }
+}
+
+//===----------------------------------------------------------------------===//
+
+SVal SValBuilder::makeSymExprValNN(ProgramStateRef State,
+                                   BinaryOperator::Opcode Op,
+                                   NonLoc LHS, NonLoc RHS,
+                                   QualType ResultTy) {
+  if (!State->isTainted(RHS) && !State->isTainted(LHS))
+    return UnknownVal();
+    
+  const SymExpr *symLHS = LHS.getAsSymExpr();
+  const SymExpr *symRHS = RHS.getAsSymExpr();
+  // TODO: When the Max Complexity is reached, we should conjure a symbol
+  // instead of generating an Unknown value and propagate the taint info to it.
+  const unsigned MaxComp = 10000; // 100000 28X
+
+  if (symLHS && symRHS &&
+      (symLHS->computeComplexity() + symRHS->computeComplexity()) <  MaxComp)
+    return makeNonLoc(symLHS, Op, symRHS, ResultTy);
+
+  if (symLHS && symLHS->computeComplexity() < MaxComp)
+    if (Optional<nonloc::ConcreteInt> rInt = RHS.getAs<nonloc::ConcreteInt>())
+      return makeNonLoc(symLHS, Op, rInt->getValue(), ResultTy);
+
+  if (symRHS && symRHS->computeComplexity() < MaxComp)
+    if (Optional<nonloc::ConcreteInt> lInt = LHS.getAs<nonloc::ConcreteInt>())
+      return makeNonLoc(lInt->getValue(), Op, symRHS, ResultTy);
+
+  return UnknownVal();
+}
+
+
+SVal SValBuilder::evalBinOp(ProgramStateRef state, BinaryOperator::Opcode op,
+                            SVal lhs, SVal rhs, QualType type) {
+
+  if (lhs.isUndef() || rhs.isUndef())
+    return UndefinedVal();
+
+  if (lhs.isUnknown() || rhs.isUnknown())
+    return UnknownVal();
+
+  if (Optional<Loc> LV = lhs.getAs<Loc>()) {
+    if (Optional<Loc> RV = rhs.getAs<Loc>())
+      return evalBinOpLL(state, op, *LV, *RV, type);
+
+    return evalBinOpLN(state, op, *LV, rhs.castAs<NonLoc>(), type);
+  }
+
+  if (Optional<Loc> RV = rhs.getAs<Loc>()) {
+    // Support pointer arithmetic where the addend is on the left
+    // and the pointer on the right.
+    assert(op == BO_Add);
+
+    // Commute the operands.
+    return evalBinOpLN(state, op, *RV, lhs.castAs<NonLoc>(), type);
+  }
+
+  return evalBinOpNN(state, op, lhs.castAs<NonLoc>(), rhs.castAs<NonLoc>(),
+                     type);
+}
+
+DefinedOrUnknownSVal SValBuilder::evalEQ(ProgramStateRef state,
+                                         DefinedOrUnknownSVal lhs,
+                                         DefinedOrUnknownSVal rhs) {
+  return evalBinOp(state, BO_EQ, lhs, rhs, Context.IntTy)
+      .castAs<DefinedOrUnknownSVal>();
+}
+
+/// Recursively check if the pointer types are equal modulo const, volatile,
+/// and restrict qualifiers. Also, assume that all types are similar to 'void'.
+/// Assumes the input types are canonical.
+static bool shouldBeModeledWithNoOp(ASTContext &Context, QualType ToTy,
+                                                         QualType FromTy) {
+  while (Context.UnwrapSimilarPointerTypes(ToTy, FromTy)) {
+    Qualifiers Quals1, Quals2;
+    ToTy = Context.getUnqualifiedArrayType(ToTy, Quals1);
+    FromTy = Context.getUnqualifiedArrayType(FromTy, Quals2);
+
+    // Make sure that non cvr-qualifiers the other qualifiers (e.g., address
+    // spaces) are identical.
+    Quals1.removeCVRQualifiers();
+    Quals2.removeCVRQualifiers();
+    if (Quals1 != Quals2)
+      return false;
+  }
+
+  // If we are casting to void, the 'From' value can be used to represent the
+  // 'To' value.
+  if (ToTy->isVoidType())
+    return true;
+
+  if (ToTy != FromTy)
+    return false;
+
+  return true;
+}
+
+// FIXME: should rewrite according to the cast kind.
+SVal SValBuilder::evalCast(SVal val, QualType castTy, QualType originalTy) {
+  castTy = Context.getCanonicalType(castTy);
+  originalTy = Context.getCanonicalType(originalTy);
+  if (val.isUnknownOrUndef() || castTy == originalTy)
+    return val;
+
+  if (castTy->isBooleanType()) {
+    if (val.isUnknownOrUndef())
+      return val;
+    if (val.isConstant())
+      return makeTruthVal(!val.isZeroConstant(), castTy);
+    if (SymbolRef Sym = val.getAsSymbol()) {
+      BasicValueFactory &BVF = getBasicValueFactory();
+      // FIXME: If we had a state here, we could see if the symbol is known to
+      // be zero, but we don't.
+      return makeNonLoc(Sym, BO_NE, BVF.getValue(0, Sym->getType()), castTy);
+    }
+
+    assert(val.getAs<Loc>());
+    return makeTruthVal(true, castTy);
+  }
+
+  // For const casts, casts to void, just propagate the value.
+  if (!castTy->isVariableArrayType() && !originalTy->isVariableArrayType())
+    if (shouldBeModeledWithNoOp(Context, Context.getPointerType(castTy),
+                                         Context.getPointerType(originalTy)))
+      return val;
+  
+  // Check for casts from pointers to integers.
+  if (castTy->isIntegralOrEnumerationType() && Loc::isLocType(originalTy))
+    return evalCastFromLoc(val.castAs<Loc>(), castTy);
+
+  // Check for casts from integers to pointers.
+  if (Loc::isLocType(castTy) && originalTy->isIntegralOrEnumerationType()) {
+    if (Optional<nonloc::LocAsInteger> LV = val.getAs<nonloc::LocAsInteger>()) {
+      if (const MemRegion *R = LV->getLoc().getAsRegion()) {
+        StoreManager &storeMgr = StateMgr.getStoreManager();
+        R = storeMgr.castRegion(R, castTy);
+        return R ? SVal(loc::MemRegionVal(R)) : UnknownVal();
+      }
+      return LV->getLoc();
+    }
+    return dispatchCast(val, castTy);
+  }
+
+  // Just pass through function and block pointers.
+  if (originalTy->isBlockPointerType() || originalTy->isFunctionPointerType()) {
+    assert(Loc::isLocType(castTy));
+    return val;
+  }
+
+  // Check for casts from array type to another type.
+  if (originalTy->isArrayType()) {
+    // We will always decay to a pointer.
+    val = StateMgr.ArrayToPointer(val.castAs<Loc>());
+
+    // Are we casting from an array to a pointer?  If so just pass on
+    // the decayed value.
+    if (castTy->isPointerType() || castTy->isReferenceType())
+      return val;
+
+    // Are we casting from an array to an integer?  If so, cast the decayed
+    // pointer value to an integer.
+    assert(castTy->isIntegralOrEnumerationType());
+
+    // FIXME: Keep these here for now in case we decide soon that we
+    // need the original decayed type.
+    //    QualType elemTy = cast<ArrayType>(originalTy)->getElementType();
+    //    QualType pointerTy = C.getPointerType(elemTy);
+    return evalCastFromLoc(val.castAs<Loc>(), castTy);
+  }
+
+  // Check for casts from a region to a specific type.
+  if (const MemRegion *R = val.getAsRegion()) {
+    // Handle other casts of locations to integers.
+    if (castTy->isIntegralOrEnumerationType())
+      return evalCastFromLoc(loc::MemRegionVal(R), castTy);
+
+    // FIXME: We should handle the case where we strip off view layers to get
+    //  to a desugared type.
+    if (!Loc::isLocType(castTy)) {
+      // FIXME: There can be gross cases where one casts the result of a function
+      // (that returns a pointer) to some other value that happens to fit
+      // within that pointer value.  We currently have no good way to
+      // model such operations.  When this happens, the underlying operation
+      // is that the caller is reasoning about bits.  Conceptually we are
+      // layering a "view" of a location on top of those bits.  Perhaps
+      // we need to be more lazy about mutual possible views, even on an
+      // SVal?  This may be necessary for bit-level reasoning as well.
+      return UnknownVal();
+    }
+
+    // We get a symbolic function pointer for a dereference of a function
+    // pointer, but it is of function type. Example:
+
+    //  struct FPRec {
+    //    void (*my_func)(int * x);
+    //  };
+    //
+    //  int bar(int x);
+    //
+    //  int f1_a(struct FPRec* foo) {
+    //    int x;
+    //    (*foo->my_func)(&x);
+    //    return bar(x)+1; // no-warning
+    //  }
+
+    assert(Loc::isLocType(originalTy) || originalTy->isFunctionType() ||
+           originalTy->isBlockPointerType() || castTy->isReferenceType());
+
+    StoreManager &storeMgr = StateMgr.getStoreManager();
+
+    // Delegate to store manager to get the result of casting a region to a
+    // different type.  If the MemRegion* returned is NULL, this expression
+    // Evaluates to UnknownVal.
+    R = storeMgr.castRegion(R, castTy);
+    return R ? SVal(loc::MemRegionVal(R)) : UnknownVal();
+  }
+
+  return dispatchCast(val, castTy);
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/SVals.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/SVals.cpp
new file mode 100644
index 0000000..6506915
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/SVals.cpp
@@ -0,0 +1,322 @@
+//= RValues.cpp - Abstract RValues for Path-Sens. Value Tracking -*- C++ -*-==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines SVal, Loc, and NonLoc, classes that represent
+//  abstract r-values for use with path-sensitive value tracking.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+using namespace ento;
+using llvm::APSInt;
+
+//===----------------------------------------------------------------------===//
+// Symbol iteration within an SVal.
+//===----------------------------------------------------------------------===//
+
+
+//===----------------------------------------------------------------------===//
+// Utility methods.
+//===----------------------------------------------------------------------===//
+
+bool SVal::hasConjuredSymbol() const {
+  if (Optional<nonloc::SymbolVal> SV = getAs<nonloc::SymbolVal>()) {
+    SymbolRef sym = SV->getSymbol();
+    if (isa<SymbolConjured>(sym))
+      return true;
+  }
+
+  if (Optional<loc::MemRegionVal> RV = getAs<loc::MemRegionVal>()) {
+    const MemRegion *R = RV->getRegion();
+    if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R)) {
+      SymbolRef sym = SR->getSymbol();
+      if (isa<SymbolConjured>(sym))
+        return true;
+    }
+  }
+
+  return false;
+}
+
+const FunctionDecl *SVal::getAsFunctionDecl() const {
+  if (Optional<loc::MemRegionVal> X = getAs<loc::MemRegionVal>()) {
+    const MemRegion* R = X->getRegion();
+    if (const FunctionTextRegion *CTR = R->getAs<FunctionTextRegion>())
+      if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CTR->getDecl()))
+        return FD;
+  }
+
+  return 0;
+}
+
+/// \brief If this SVal is a location (subclasses Loc) and wraps a symbol,
+/// return that SymbolRef.  Otherwise return 0.
+///
+/// Implicit casts (ex: void* -> char*) can turn Symbolic region into Element
+/// region. If that is the case, gets the underlining region.
+/// When IncludeBaseRegions is set to true and the SubRegion is non-symbolic,
+/// the first symbolic parent region is returned.
+SymbolRef SVal::getAsLocSymbol(bool IncludeBaseRegions) const {
+  // FIXME: should we consider SymbolRef wrapped in CodeTextRegion?
+  if (Optional<nonloc::LocAsInteger> X = getAs<nonloc::LocAsInteger>())
+    return X->getLoc().getAsLocSymbol();
+
+  if (Optional<loc::MemRegionVal> X = getAs<loc::MemRegionVal>()) {
+    const MemRegion *R = X->getRegion();
+    if (const SymbolicRegion *SymR = IncludeBaseRegions ?
+                                      R->getSymbolicBase() :
+                                      dyn_cast<SymbolicRegion>(R->StripCasts()))
+      return SymR->getSymbol();
+  }
+  return 0;
+}
+
+/// Get the symbol in the SVal or its base region.
+SymbolRef SVal::getLocSymbolInBase() const {
+  Optional<loc::MemRegionVal> X = getAs<loc::MemRegionVal>();
+
+  if (!X)
+    return 0;
+
+  const MemRegion *R = X->getRegion();
+
+  while (const SubRegion *SR = dyn_cast<SubRegion>(R)) {
+    if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(SR))
+      return SymR->getSymbol();
+    else
+      R = SR->getSuperRegion();
+  }
+
+  return 0;
+}
+
+// TODO: The next 3 functions have to be simplified.
+
+/// \brief If this SVal wraps a symbol return that SymbolRef.
+/// Otherwise, return 0.
+///
+/// Casts are ignored during lookup.
+/// \param IncludeBaseRegions The boolean that controls whether the search
+/// should continue to the base regions if the region is not symbolic.
+SymbolRef SVal::getAsSymbol(bool IncludeBaseRegion) const {
+  // FIXME: should we consider SymbolRef wrapped in CodeTextRegion?
+  if (Optional<nonloc::SymbolVal> X = getAs<nonloc::SymbolVal>())
+    return X->getSymbol();
+
+  return getAsLocSymbol(IncludeBaseRegion);
+}
+
+/// getAsSymbolicExpression - If this Sval wraps a symbolic expression then
+///  return that expression.  Otherwise return NULL.
+const SymExpr *SVal::getAsSymbolicExpression() const {
+  if (Optional<nonloc::SymbolVal> X = getAs<nonloc::SymbolVal>())
+    return X->getSymbol();
+
+  return getAsSymbol();
+}
+
+const SymExpr* SVal::getAsSymExpr() const {
+  const SymExpr* Sym = getAsSymbol();
+  if (!Sym)
+    Sym = getAsSymbolicExpression();
+  return Sym;
+}
+
+const MemRegion *SVal::getAsRegion() const {
+  if (Optional<loc::MemRegionVal> X = getAs<loc::MemRegionVal>())
+    return X->getRegion();
+
+  if (Optional<nonloc::LocAsInteger> X = getAs<nonloc::LocAsInteger>())
+    return X->getLoc().getAsRegion();
+
+  return 0;
+}
+
+const MemRegion *loc::MemRegionVal::stripCasts(bool StripBaseCasts) const {
+  const MemRegion *R = getRegion();
+  return R ?  R->StripCasts(StripBaseCasts) : NULL;
+}
+
+const void *nonloc::LazyCompoundVal::getStore() const {
+  return static_cast<const LazyCompoundValData*>(Data)->getStore();
+}
+
+const TypedValueRegion *nonloc::LazyCompoundVal::getRegion() const {
+  return static_cast<const LazyCompoundValData*>(Data)->getRegion();
+}
+
+//===----------------------------------------------------------------------===//
+// Other Iterators.
+//===----------------------------------------------------------------------===//
+
+nonloc::CompoundVal::iterator nonloc::CompoundVal::begin() const {
+  return getValue()->begin();
+}
+
+nonloc::CompoundVal::iterator nonloc::CompoundVal::end() const {
+  return getValue()->end();
+}
+
+//===----------------------------------------------------------------------===//
+// Useful predicates.
+//===----------------------------------------------------------------------===//
+
+bool SVal::isConstant() const {
+  return getAs<nonloc::ConcreteInt>() || getAs<loc::ConcreteInt>();
+}
+
+bool SVal::isConstant(int I) const {
+  if (Optional<loc::ConcreteInt> LV = getAs<loc::ConcreteInt>())
+    return LV->getValue() == I;
+  if (Optional<nonloc::ConcreteInt> NV = getAs<nonloc::ConcreteInt>())
+    return NV->getValue() == I;
+  return false;
+}
+
+bool SVal::isZeroConstant() const {
+  return isConstant(0);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Transfer function dispatch for Non-Locs.
+//===----------------------------------------------------------------------===//
+
+SVal nonloc::ConcreteInt::evalBinOp(SValBuilder &svalBuilder,
+                                    BinaryOperator::Opcode Op,
+                                    const nonloc::ConcreteInt& R) const {
+  const llvm::APSInt* X =
+    svalBuilder.getBasicValueFactory().evalAPSInt(Op, getValue(), R.getValue());
+
+  if (X)
+    return nonloc::ConcreteInt(*X);
+  else
+    return UndefinedVal();
+}
+
+nonloc::ConcreteInt
+nonloc::ConcreteInt::evalComplement(SValBuilder &svalBuilder) const {
+  return svalBuilder.makeIntVal(~getValue());
+}
+
+nonloc::ConcreteInt
+nonloc::ConcreteInt::evalMinus(SValBuilder &svalBuilder) const {
+  return svalBuilder.makeIntVal(-getValue());
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function dispatch for Locs.
+//===----------------------------------------------------------------------===//
+
+SVal loc::ConcreteInt::evalBinOp(BasicValueFactory& BasicVals,
+                                 BinaryOperator::Opcode Op,
+                                 const loc::ConcreteInt& R) const {
+
+  assert(BinaryOperator::isComparisonOp(Op) || Op == BO_Sub);
+
+  const llvm::APSInt *X = BasicVals.evalAPSInt(Op, getValue(), R.getValue());
+
+  if (X)
+    return nonloc::ConcreteInt(*X);
+  else
+    return UndefinedVal();
+}
+
+//===----------------------------------------------------------------------===//
+// Pretty-Printing.
+//===----------------------------------------------------------------------===//
+
+void SVal::dump() const { dumpToStream(llvm::errs()); }
+
+void SVal::dumpToStream(raw_ostream &os) const {
+  switch (getBaseKind()) {
+    case UnknownKind:
+      os << "Unknown";
+      break;
+    case NonLocKind:
+      castAs<NonLoc>().dumpToStream(os);
+      break;
+    case LocKind:
+      castAs<Loc>().dumpToStream(os);
+      break;
+    case UndefinedKind:
+      os << "Undefined";
+      break;
+  }
+}
+
+void NonLoc::dumpToStream(raw_ostream &os) const {
+  switch (getSubKind()) {
+    case nonloc::ConcreteIntKind: {
+      const nonloc::ConcreteInt& C = castAs<nonloc::ConcreteInt>();
+      if (C.getValue().isUnsigned())
+        os << C.getValue().getZExtValue();
+      else
+        os << C.getValue().getSExtValue();
+      os << ' ' << (C.getValue().isUnsigned() ? 'U' : 'S')
+         << C.getValue().getBitWidth() << 'b';
+      break;
+    }
+    case nonloc::SymbolValKind: {
+      os << castAs<nonloc::SymbolVal>().getSymbol();
+      break;
+    }
+    case nonloc::LocAsIntegerKind: {
+      const nonloc::LocAsInteger& C = castAs<nonloc::LocAsInteger>();
+      os << C.getLoc() << " [as " << C.getNumBits() << " bit integer]";
+      break;
+    }
+    case nonloc::CompoundValKind: {
+      const nonloc::CompoundVal& C = castAs<nonloc::CompoundVal>();
+      os << "compoundVal{";
+      bool first = true;
+      for (nonloc::CompoundVal::iterator I=C.begin(), E=C.end(); I!=E; ++I) {
+        if (first) {
+          os << ' '; first = false;
+        }
+        else
+          os << ", ";
+
+        (*I).dumpToStream(os);
+      }
+      os << "}";
+      break;
+    }
+    case nonloc::LazyCompoundValKind: {
+      const nonloc::LazyCompoundVal &C = castAs<nonloc::LazyCompoundVal>();
+      os << "lazyCompoundVal{" << const_cast<void *>(C.getStore())
+         << ',' << C.getRegion()
+         << '}';
+      break;
+    }
+    default:
+      assert (false && "Pretty-printed not implemented for this NonLoc.");
+      break;
+  }
+}
+
+void Loc::dumpToStream(raw_ostream &os) const {
+  switch (getSubKind()) {
+    case loc::ConcreteIntKind:
+      os << castAs<loc::ConcreteInt>().getValue().getZExtValue() << " (Loc)";
+      break;
+    case loc::GotoLabelKind:
+      os << "&&" << castAs<loc::GotoLabel>().getLabel()->getName();
+      break;
+    case loc::MemRegionKind:
+      os << '&' << castAs<loc::MemRegionVal>().getRegion()->getString();
+      break;
+    default:
+      llvm_unreachable("Pretty-printing not implemented for this Loc.");
+  }
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
new file mode 100644
index 0000000..a06268d
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
@@ -0,0 +1,298 @@
+//== SimpleConstraintManager.cpp --------------------------------*- C++ -*--==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines SimpleConstraintManager, a class that holds code shared
+//  between BasicConstraintManager and RangeConstraintManager.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SimpleConstraintManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+
+namespace clang {
+
+namespace ento {
+
+SimpleConstraintManager::~SimpleConstraintManager() {}
+
+bool SimpleConstraintManager::canReasonAbout(SVal X) const {
+  Optional<nonloc::SymbolVal> SymVal = X.getAs<nonloc::SymbolVal>();
+  if (SymVal && SymVal->isExpression()) {
+    const SymExpr *SE = SymVal->getSymbol();
+
+    if (const SymIntExpr *SIE = dyn_cast<SymIntExpr>(SE)) {
+      switch (SIE->getOpcode()) {
+          // We don't reason yet about bitwise-constraints on symbolic values.
+        case BO_And:
+        case BO_Or:
+        case BO_Xor:
+          return false;
+        // We don't reason yet about these arithmetic constraints on
+        // symbolic values.
+        case BO_Mul:
+        case BO_Div:
+        case BO_Rem:
+        case BO_Shl:
+        case BO_Shr:
+          return false;
+        // All other cases.
+        default:
+          return true;
+      }
+    }
+
+    if (const SymSymExpr *SSE = dyn_cast<SymSymExpr>(SE)) {
+      if (BinaryOperator::isComparisonOp(SSE->getOpcode())) {
+        // We handle Loc <> Loc comparisons, but not (yet) NonLoc <> NonLoc.
+        if (Loc::isLocType(SSE->getLHS()->getType())) {
+          assert(Loc::isLocType(SSE->getRHS()->getType()));
+          return true;
+        }
+      }
+    }
+
+    return false;
+  }
+
+  return true;
+}
+
+ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef state,
+                                               DefinedSVal Cond,
+                                               bool Assumption) {
+  if (Optional<NonLoc> NV = Cond.getAs<NonLoc>())
+    return assume(state, *NV, Assumption);
+  return assume(state, Cond.castAs<Loc>(), Assumption);
+}
+
+ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef state, Loc cond,
+                                               bool assumption) {
+  state = assumeAux(state, cond, assumption);
+  if (NotifyAssumeClients && SU)
+    return SU->processAssume(state, cond, assumption);
+  return state;
+}
+
+ProgramStateRef SimpleConstraintManager::assumeAux(ProgramStateRef state,
+                                                  Loc Cond, bool Assumption) {
+  switch (Cond.getSubKind()) {
+  default:
+    assert (false && "'Assume' not implemented for this Loc.");
+    return state;
+
+  case loc::MemRegionKind: {
+    // FIXME: Should this go into the storemanager?
+    const MemRegion *R = Cond.castAs<loc::MemRegionVal>().getRegion();
+
+    // FIXME: now we only find the first symbolic region.
+    if (const SymbolicRegion *SymR = R->getSymbolicBase()) {
+      const llvm::APSInt &zero = getBasicVals().getZeroWithPtrWidth();
+      if (Assumption)
+        return assumeSymNE(state, SymR->getSymbol(), zero, zero);
+      else
+        return assumeSymEQ(state, SymR->getSymbol(), zero, zero);
+    }
+
+    // FALL-THROUGH.
+  }
+
+  case loc::GotoLabelKind:
+    return Assumption ? state : NULL;
+
+  case loc::ConcreteIntKind: {
+    bool b = Cond.castAs<loc::ConcreteInt>().getValue() != 0;
+    bool isFeasible = b ? Assumption : !Assumption;
+    return isFeasible ? state : NULL;
+  }
+  } // end switch
+}
+
+ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef state,
+                                               NonLoc cond,
+                                               bool assumption) {
+  state = assumeAux(state, cond, assumption);
+  if (NotifyAssumeClients && SU)
+    return SU->processAssume(state, cond, assumption);
+  return state;
+}
+
+
+ProgramStateRef
+SimpleConstraintManager::assumeAuxForSymbol(ProgramStateRef State,
+                                            SymbolRef Sym, bool Assumption) {
+  BasicValueFactory &BVF = getBasicVals();
+  QualType T = Sym->getType();
+
+  // None of the constraint solvers currently support non-integer types.
+  if (!T->isIntegralOrEnumerationType())
+    return State;
+
+  const llvm::APSInt &zero = BVF.getValue(0, T);
+  if (Assumption)
+    return assumeSymNE(State, Sym, zero, zero);
+  else
+    return assumeSymEQ(State, Sym, zero, zero);
+}
+
+ProgramStateRef SimpleConstraintManager::assumeAux(ProgramStateRef state,
+                                                  NonLoc Cond,
+                                                  bool Assumption) {
+
+  // We cannot reason about SymSymExprs, and can only reason about some
+  // SymIntExprs.
+  if (!canReasonAbout(Cond)) {
+    // Just add the constraint to the expression without trying to simplify.
+    SymbolRef sym = Cond.getAsSymExpr();
+    return assumeAuxForSymbol(state, sym, Assumption);
+  }
+
+  switch (Cond.getSubKind()) {
+  default:
+    llvm_unreachable("'Assume' not implemented for this NonLoc");
+
+  case nonloc::SymbolValKind: {
+    nonloc::SymbolVal SV = Cond.castAs<nonloc::SymbolVal>();
+    SymbolRef sym = SV.getSymbol();
+    assert(sym);
+
+    // Handle SymbolData.
+    if (!SV.isExpression()) {
+      return assumeAuxForSymbol(state, sym, Assumption);
+
+    // Handle symbolic expression.
+    } else if (const SymIntExpr *SE = dyn_cast<SymIntExpr>(sym)) {
+      // We can only simplify expressions whose RHS is an integer.
+
+      BinaryOperator::Opcode op = SE->getOpcode();
+      if (BinaryOperator::isComparisonOp(op)) {
+        if (!Assumption)
+          op = BinaryOperator::negateComparisonOp(op);
+
+        return assumeSymRel(state, SE->getLHS(), op, SE->getRHS());
+      }
+
+    } else if (const SymSymExpr *SSE = dyn_cast<SymSymExpr>(sym)) {
+      // Translate "a != b" to "(b - a) != 0".
+      // We invert the order of the operands as a heuristic for how loop
+      // conditions are usually written ("begin != end") as compared to length
+      // calculations ("end - begin"). The more correct thing to do would be to
+      // canonicalize "a - b" and "b - a", which would allow us to treat
+      // "a != b" and "b != a" the same.
+      SymbolManager &SymMgr = getSymbolManager();
+      BinaryOperator::Opcode Op = SSE->getOpcode();
+      assert(BinaryOperator::isComparisonOp(Op));
+
+      // For now, we only support comparing pointers.
+      assert(Loc::isLocType(SSE->getLHS()->getType()));
+      assert(Loc::isLocType(SSE->getRHS()->getType()));
+      QualType DiffTy = SymMgr.getContext().getPointerDiffType();
+      SymbolRef Subtraction = SymMgr.getSymSymExpr(SSE->getRHS(), BO_Sub,
+                                                   SSE->getLHS(), DiffTy);
+
+      const llvm::APSInt &Zero = getBasicVals().getValue(0, DiffTy);
+      Op = BinaryOperator::reverseComparisonOp(Op);
+      if (!Assumption)
+        Op = BinaryOperator::negateComparisonOp(Op);
+      return assumeSymRel(state, Subtraction, Op, Zero);
+    }
+
+    // If we get here, there's nothing else we can do but treat the symbol as
+    // opaque.
+    return assumeAuxForSymbol(state, sym, Assumption);
+  }
+
+  case nonloc::ConcreteIntKind: {
+    bool b = Cond.castAs<nonloc::ConcreteInt>().getValue() != 0;
+    bool isFeasible = b ? Assumption : !Assumption;
+    return isFeasible ? state : NULL;
+  }
+
+  case nonloc::LocAsIntegerKind:
+    return assumeAux(state, Cond.castAs<nonloc::LocAsInteger>().getLoc(),
+                     Assumption);
+  } // end switch
+}
+
+static void computeAdjustment(SymbolRef &Sym, llvm::APSInt &Adjustment) {
+  // Is it a "($sym+constant1)" expression?
+  if (const SymIntExpr *SE = dyn_cast<SymIntExpr>(Sym)) {
+    BinaryOperator::Opcode Op = SE->getOpcode();
+    if (Op == BO_Add || Op == BO_Sub) {
+      Sym = SE->getLHS();
+      Adjustment = APSIntType(Adjustment).convert(SE->getRHS());
+
+      // Don't forget to negate the adjustment if it's being subtracted.
+      // This should happen /after/ promotion, in case the value being
+      // subtracted is, say, CHAR_MIN, and the promoted type is 'int'.
+      if (Op == BO_Sub)
+        Adjustment = -Adjustment;
+    }
+  }
+}
+
+ProgramStateRef SimpleConstraintManager::assumeSymRel(ProgramStateRef state,
+                                                     const SymExpr *LHS,
+                                                     BinaryOperator::Opcode op,
+                                                     const llvm::APSInt& Int) {
+  assert(BinaryOperator::isComparisonOp(op) &&
+         "Non-comparison ops should be rewritten as comparisons to zero.");
+
+  // Get the type used for calculating wraparound.
+  BasicValueFactory &BVF = getBasicVals();
+  APSIntType WraparoundType = BVF.getAPSIntType(LHS->getType());
+
+  // We only handle simple comparisons of the form "$sym == constant"
+  // or "($sym+constant1) == constant2".
+  // The adjustment is "constant1" in the above expression. It's used to
+  // "slide" the solution range around for modular arithmetic. For example,
+  // x < 4 has the solution [0, 3]. x+2 < 4 has the solution [0-2, 3-2], which
+  // in modular arithmetic is [0, 1] U [UINT_MAX-1, UINT_MAX]. It's up to
+  // the subclasses of SimpleConstraintManager to handle the adjustment.
+  SymbolRef Sym = LHS;
+  llvm::APSInt Adjustment = WraparoundType.getZeroValue();
+  computeAdjustment(Sym, Adjustment);
+
+  // Convert the right-hand side integer as necessary.
+  APSIntType ComparisonType = std::max(WraparoundType, APSIntType(Int));
+  llvm::APSInt ConvertedInt = ComparisonType.convert(Int);
+
+  // Prefer unsigned comparisons.
+  if (ComparisonType.getBitWidth() == WraparoundType.getBitWidth() &&
+      ComparisonType.isUnsigned() && !WraparoundType.isUnsigned())
+    Adjustment.setIsSigned(false);
+
+  switch (op) {
+  default:
+    llvm_unreachable("invalid operation not caught by assertion above");
+
+  case BO_EQ:
+    return assumeSymEQ(state, Sym, ConvertedInt, Adjustment);
+
+  case BO_NE:
+    return assumeSymNE(state, Sym, ConvertedInt, Adjustment);
+
+  case BO_GT:
+    return assumeSymGT(state, Sym, ConvertedInt, Adjustment);
+
+  case BO_GE:
+    return assumeSymGE(state, Sym, ConvertedInt, Adjustment);
+
+  case BO_LT:
+    return assumeSymLT(state, Sym, ConvertedInt, Adjustment);
+
+  case BO_LE:
+    return assumeSymLE(state, Sym, ConvertedInt, Adjustment);
+  } // end switch
+}
+
+} // end of namespace ento
+
+} // end of namespace clang
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h b/safecode/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
new file mode 100644
index 0000000..10ddef1
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
@@ -0,0 +1,106 @@
+//== SimpleConstraintManager.h ----------------------------------*- C++ -*--==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  Code shared between BasicConstraintManager and RangeConstraintManager.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_SIMPLE_CONSTRAINT_MANAGER_H
+#define LLVM_CLANG_GR_SIMPLE_CONSTRAINT_MANAGER_H
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+
+namespace clang {
+
+namespace ento {
+
+class SimpleConstraintManager : public ConstraintManager {
+  SubEngine *SU;
+  SValBuilder &SVB;
+public:
+  SimpleConstraintManager(SubEngine *subengine, SValBuilder &SB)
+    : SU(subengine), SVB(SB) {}
+  virtual ~SimpleConstraintManager();
+
+  //===------------------------------------------------------------------===//
+  // Common implementation for the interface provided by ConstraintManager.
+  //===------------------------------------------------------------------===//
+
+  ProgramStateRef assume(ProgramStateRef state, DefinedSVal Cond,
+                        bool Assumption);
+
+  ProgramStateRef assume(ProgramStateRef state, Loc Cond, bool Assumption);
+
+  ProgramStateRef assume(ProgramStateRef state, NonLoc Cond, bool Assumption);
+
+  ProgramStateRef assumeSymRel(ProgramStateRef state,
+                              const SymExpr *LHS,
+                              BinaryOperator::Opcode op,
+                              const llvm::APSInt& Int);
+
+protected:
+
+  //===------------------------------------------------------------------===//
+  // Interface that subclasses must implement.
+  //===------------------------------------------------------------------===//
+
+  // Each of these is of the form "$sym+Adj <> V", where "<>" is the comparison
+  // operation for the method being invoked.
+  virtual ProgramStateRef assumeSymNE(ProgramStateRef state, SymbolRef sym,
+                                     const llvm::APSInt& V,
+                                     const llvm::APSInt& Adjustment) = 0;
+
+  virtual ProgramStateRef assumeSymEQ(ProgramStateRef state, SymbolRef sym,
+                                     const llvm::APSInt& V,
+                                     const llvm::APSInt& Adjustment) = 0;
+
+  virtual ProgramStateRef assumeSymLT(ProgramStateRef state, SymbolRef sym,
+                                     const llvm::APSInt& V,
+                                     const llvm::APSInt& Adjustment) = 0;
+
+  virtual ProgramStateRef assumeSymGT(ProgramStateRef state, SymbolRef sym,
+                                     const llvm::APSInt& V,
+                                     const llvm::APSInt& Adjustment) = 0;
+
+  virtual ProgramStateRef assumeSymLE(ProgramStateRef state, SymbolRef sym,
+                                     const llvm::APSInt& V,
+                                     const llvm::APSInt& Adjustment) = 0;
+
+  virtual ProgramStateRef assumeSymGE(ProgramStateRef state, SymbolRef sym,
+                                     const llvm::APSInt& V,
+                                     const llvm::APSInt& Adjustment) = 0;
+
+  //===------------------------------------------------------------------===//
+  // Internal implementation.
+  //===------------------------------------------------------------------===//
+
+  BasicValueFactory &getBasicVals() const { return SVB.getBasicValueFactory(); }
+  SymbolManager &getSymbolManager() const { return SVB.getSymbolManager(); }
+
+  bool canReasonAbout(SVal X) const;
+
+  ProgramStateRef assumeAux(ProgramStateRef state,
+                                Loc Cond,
+                                bool Assumption);
+
+  ProgramStateRef assumeAux(ProgramStateRef state,
+                                NonLoc Cond,
+                                bool Assumption);
+
+  ProgramStateRef assumeAuxForSymbol(ProgramStateRef State,
+                                         SymbolRef Sym,
+                                         bool Assumption);
+};
+
+} // end GR namespace
+
+} // end clang namespace
+
+#endif
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
new file mode 100644
index 0000000..ee627f2
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
@@ -0,0 +1,941 @@
+// SimpleSValBuilder.cpp - A basic SValBuilder -----------------------*- C++ -*-
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines SimpleSValBuilder, a basic implementation of SValBuilder.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class SimpleSValBuilder : public SValBuilder {
+protected:
+  virtual SVal dispatchCast(SVal val, QualType castTy);
+  virtual SVal evalCastFromNonLoc(NonLoc val, QualType castTy);
+  virtual SVal evalCastFromLoc(Loc val, QualType castTy);
+
+public:
+  SimpleSValBuilder(llvm::BumpPtrAllocator &alloc, ASTContext &context,
+                    ProgramStateManager &stateMgr)
+                    : SValBuilder(alloc, context, stateMgr) {}
+  virtual ~SimpleSValBuilder() {}
+
+  virtual SVal evalMinus(NonLoc val);
+  virtual SVal evalComplement(NonLoc val);
+  virtual SVal evalBinOpNN(ProgramStateRef state, BinaryOperator::Opcode op,
+                           NonLoc lhs, NonLoc rhs, QualType resultTy);
+  virtual SVal evalBinOpLL(ProgramStateRef state, BinaryOperator::Opcode op,
+                           Loc lhs, Loc rhs, QualType resultTy);
+  virtual SVal evalBinOpLN(ProgramStateRef state, BinaryOperator::Opcode op,
+                           Loc lhs, NonLoc rhs, QualType resultTy);
+
+  /// getKnownValue - evaluates a given SVal. If the SVal has only one possible
+  ///  (integer) value, that value is returned. Otherwise, returns NULL.
+  virtual const llvm::APSInt *getKnownValue(ProgramStateRef state, SVal V);
+  
+  SVal MakeSymIntVal(const SymExpr *LHS, BinaryOperator::Opcode op,
+                     const llvm::APSInt &RHS, QualType resultTy);
+};
+} // end anonymous namespace
+
+SValBuilder *ento::createSimpleSValBuilder(llvm::BumpPtrAllocator &alloc,
+                                           ASTContext &context,
+                                           ProgramStateManager &stateMgr) {
+  return new SimpleSValBuilder(alloc, context, stateMgr);
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function for Casts.
+//===----------------------------------------------------------------------===//
+
+SVal SimpleSValBuilder::dispatchCast(SVal Val, QualType CastTy) {
+  assert(Val.getAs<Loc>() || Val.getAs<NonLoc>());
+  return Val.getAs<Loc>() ? evalCastFromLoc(Val.castAs<Loc>(), CastTy)
+                           : evalCastFromNonLoc(Val.castAs<NonLoc>(), CastTy);
+}
+
+SVal SimpleSValBuilder::evalCastFromNonLoc(NonLoc val, QualType castTy) {
+
+  bool isLocType = Loc::isLocType(castTy);
+
+  if (Optional<nonloc::LocAsInteger> LI = val.getAs<nonloc::LocAsInteger>()) {
+    if (isLocType)
+      return LI->getLoc();
+
+    // FIXME: Correctly support promotions/truncations.
+    unsigned castSize = Context.getTypeSize(castTy);
+    if (castSize == LI->getNumBits())
+      return val;
+    return makeLocAsInteger(LI->getLoc(), castSize);
+  }
+
+  if (const SymExpr *se = val.getAsSymbolicExpression()) {
+    QualType T = Context.getCanonicalType(se->getType());
+    // If types are the same or both are integers, ignore the cast.
+    // FIXME: Remove this hack when we support symbolic truncation/extension.
+    // HACK: If both castTy and T are integers, ignore the cast.  This is
+    // not a permanent solution.  Eventually we want to precisely handle
+    // extension/truncation of symbolic integers.  This prevents us from losing
+    // precision when we assign 'x = y' and 'y' is symbolic and x and y are
+    // different integer types.
+   if (haveSameType(T, castTy))
+      return val;
+
+    if (!isLocType)
+      return makeNonLoc(se, T, castTy);
+    return UnknownVal();
+  }
+
+  // If value is a non integer constant, produce unknown.
+  if (!val.getAs<nonloc::ConcreteInt>())
+    return UnknownVal();
+
+  // Handle casts to a boolean type.
+  if (castTy->isBooleanType()) {
+    bool b = val.castAs<nonloc::ConcreteInt>().getValue().getBoolValue();
+    return makeTruthVal(b, castTy);
+  }
+
+  // Only handle casts from integers to integers - if val is an integer constant
+  // being cast to a non integer type, produce unknown.
+  if (!isLocType && !castTy->isIntegralOrEnumerationType())
+    return UnknownVal();
+
+  llvm::APSInt i = val.castAs<nonloc::ConcreteInt>().getValue();
+  BasicVals.getAPSIntType(castTy).apply(i);
+
+  if (isLocType)
+    return makeIntLocVal(i);
+  else
+    return makeIntVal(i);
+}
+
+SVal SimpleSValBuilder::evalCastFromLoc(Loc val, QualType castTy) {
+
+  // Casts from pointers -> pointers, just return the lval.
+  //
+  // Casts from pointers -> references, just return the lval.  These
+  //   can be introduced by the frontend for corner cases, e.g
+  //   casting from va_list* to __builtin_va_list&.
+  //
+  if (Loc::isLocType(castTy) || castTy->isReferenceType())
+    return val;
+
+  // FIXME: Handle transparent unions where a value can be "transparently"
+  //  lifted into a union type.
+  if (castTy->isUnionType())
+    return UnknownVal();
+
+  if (castTy->isIntegralOrEnumerationType()) {
+    unsigned BitWidth = Context.getTypeSize(castTy);
+
+    if (!val.getAs<loc::ConcreteInt>())
+      return makeLocAsInteger(val, BitWidth);
+
+    llvm::APSInt i = val.castAs<loc::ConcreteInt>().getValue();
+    BasicVals.getAPSIntType(castTy).apply(i);
+    return makeIntVal(i);
+  }
+
+  // All other cases: return 'UnknownVal'.  This includes casting pointers
+  // to floats, which is probably badness it itself, but this is a good
+  // intermediate solution until we do something better.
+  return UnknownVal();
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function for unary operators.
+//===----------------------------------------------------------------------===//
+
+SVal SimpleSValBuilder::evalMinus(NonLoc val) {
+  switch (val.getSubKind()) {
+  case nonloc::ConcreteIntKind:
+    return val.castAs<nonloc::ConcreteInt>().evalMinus(*this);
+  default:
+    return UnknownVal();
+  }
+}
+
+SVal SimpleSValBuilder::evalComplement(NonLoc X) {
+  switch (X.getSubKind()) {
+  case nonloc::ConcreteIntKind:
+    return X.castAs<nonloc::ConcreteInt>().evalComplement(*this);
+  default:
+    return UnknownVal();
+  }
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function for binary operators.
+//===----------------------------------------------------------------------===//
+
+SVal SimpleSValBuilder::MakeSymIntVal(const SymExpr *LHS,
+                                    BinaryOperator::Opcode op,
+                                    const llvm::APSInt &RHS,
+                                    QualType resultTy) {
+  bool isIdempotent = false;
+
+  // Check for a few special cases with known reductions first.
+  switch (op) {
+  default:
+    // We can't reduce this case; just treat it normally.
+    break;
+  case BO_Mul:
+    // a*0 and a*1
+    if (RHS == 0)
+      return makeIntVal(0, resultTy);
+    else if (RHS == 1)
+      isIdempotent = true;
+    break;
+  case BO_Div:
+    // a/0 and a/1
+    if (RHS == 0)
+      // This is also handled elsewhere.
+      return UndefinedVal();
+    else if (RHS == 1)
+      isIdempotent = true;
+    break;
+  case BO_Rem:
+    // a%0 and a%1
+    if (RHS == 0)
+      // This is also handled elsewhere.
+      return UndefinedVal();
+    else if (RHS == 1)
+      return makeIntVal(0, resultTy);
+    break;
+  case BO_Add:
+  case BO_Sub:
+  case BO_Shl:
+  case BO_Shr:
+  case BO_Xor:
+    // a+0, a-0, a<<0, a>>0, a^0
+    if (RHS == 0)
+      isIdempotent = true;
+    break;
+  case BO_And:
+    // a&0 and a&(~0)
+    if (RHS == 0)
+      return makeIntVal(0, resultTy);
+    else if (RHS.isAllOnesValue())
+      isIdempotent = true;
+    break;
+  case BO_Or:
+    // a|0 and a|(~0)
+    if (RHS == 0)
+      isIdempotent = true;
+    else if (RHS.isAllOnesValue()) {
+      const llvm::APSInt &Result = BasicVals.Convert(resultTy, RHS);
+      return nonloc::ConcreteInt(Result);
+    }
+    break;
+  }
+
+  // Idempotent ops (like a*1) can still change the type of an expression.
+  // Wrap the LHS up in a NonLoc again and let evalCastFromNonLoc do the
+  // dirty work.
+  if (isIdempotent)
+      return evalCastFromNonLoc(nonloc::SymbolVal(LHS), resultTy);
+
+  // If we reach this point, the expression cannot be simplified.
+  // Make a SymbolVal for the entire expression, after converting the RHS.
+  const llvm::APSInt *ConvertedRHS = &RHS;
+  if (BinaryOperator::isComparisonOp(op)) {
+    // We're looking for a type big enough to compare the symbolic value
+    // with the given constant.
+    // FIXME: This is an approximation of Sema::UsualArithmeticConversions.
+    ASTContext &Ctx = getContext();
+    QualType SymbolType = LHS->getType();
+    uint64_t ValWidth = RHS.getBitWidth();
+    uint64_t TypeWidth = Ctx.getTypeSize(SymbolType);
+
+    if (ValWidth < TypeWidth) {
+      // If the value is too small, extend it.
+      ConvertedRHS = &BasicVals.Convert(SymbolType, RHS);
+    } else if (ValWidth == TypeWidth) {
+      // If the value is signed but the symbol is unsigned, do the comparison
+      // in unsigned space. [C99 6.3.1.8]
+      // (For the opposite case, the value is already unsigned.)
+      if (RHS.isSigned() && !SymbolType->isSignedIntegerOrEnumerationType())
+        ConvertedRHS = &BasicVals.Convert(SymbolType, RHS);
+    }
+  } else
+    ConvertedRHS = &BasicVals.Convert(resultTy, RHS);
+
+  return makeNonLoc(LHS, op, *ConvertedRHS, resultTy);
+}
+
+SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
+                                  BinaryOperator::Opcode op,
+                                  NonLoc lhs, NonLoc rhs,
+                                  QualType resultTy)  {
+  NonLoc InputLHS = lhs;
+  NonLoc InputRHS = rhs;
+
+  // Handle trivial case where left-side and right-side are the same.
+  if (lhs == rhs)
+    switch (op) {
+      default:
+        break;
+      case BO_EQ:
+      case BO_LE:
+      case BO_GE:
+        return makeTruthVal(true, resultTy);
+      case BO_LT:
+      case BO_GT:
+      case BO_NE:
+        return makeTruthVal(false, resultTy);
+      case BO_Xor:
+      case BO_Sub:
+        if (resultTy->isIntegralOrEnumerationType())
+          return makeIntVal(0, resultTy);
+        return evalCastFromNonLoc(makeIntVal(0, /*Unsigned=*/false), resultTy);
+      case BO_Or:
+      case BO_And:
+        return evalCastFromNonLoc(lhs, resultTy);
+    }
+
+  while (1) {
+    switch (lhs.getSubKind()) {
+    default:
+      return makeSymExprValNN(state, op, lhs, rhs, resultTy);
+    case nonloc::LocAsIntegerKind: {
+      Loc lhsL = lhs.castAs<nonloc::LocAsInteger>().getLoc();
+      switch (rhs.getSubKind()) {
+        case nonloc::LocAsIntegerKind:
+          return evalBinOpLL(state, op, lhsL,
+                             rhs.castAs<nonloc::LocAsInteger>().getLoc(),
+                             resultTy);
+        case nonloc::ConcreteIntKind: {
+          // Transform the integer into a location and compare.
+          llvm::APSInt i = rhs.castAs<nonloc::ConcreteInt>().getValue();
+          BasicVals.getAPSIntType(Context.VoidPtrTy).apply(i);
+          return evalBinOpLL(state, op, lhsL, makeLoc(i), resultTy);
+        }
+        default:
+          switch (op) {
+            case BO_EQ:
+              return makeTruthVal(false, resultTy);
+            case BO_NE:
+              return makeTruthVal(true, resultTy);
+            default:
+              // This case also handles pointer arithmetic.
+              return makeSymExprValNN(state, op, InputLHS, InputRHS, resultTy);
+          }
+      }
+    }
+    case nonloc::ConcreteIntKind: {
+      llvm::APSInt LHSValue = lhs.castAs<nonloc::ConcreteInt>().getValue();
+
+      // If we're dealing with two known constants, just perform the operation.
+      if (const llvm::APSInt *KnownRHSValue = getKnownValue(state, rhs)) {
+        llvm::APSInt RHSValue = *KnownRHSValue;
+        if (BinaryOperator::isComparisonOp(op)) {
+          // We're looking for a type big enough to compare the two values.
+          // FIXME: This is not correct. char + short will result in a promotion
+          // to int. Unfortunately we have lost types by this point.
+          APSIntType CompareType = std::max(APSIntType(LHSValue),
+                                            APSIntType(RHSValue));
+          CompareType.apply(LHSValue);
+          CompareType.apply(RHSValue);
+        } else if (!BinaryOperator::isShiftOp(op)) {
+          APSIntType IntType = BasicVals.getAPSIntType(resultTy);
+          IntType.apply(LHSValue);
+          IntType.apply(RHSValue);
+        }
+
+        const llvm::APSInt *Result =
+          BasicVals.evalAPSInt(op, LHSValue, RHSValue);
+        if (!Result)
+          return UndefinedVal();
+
+        return nonloc::ConcreteInt(*Result);
+      }
+
+      // Swap the left and right sides and flip the operator if doing so
+      // allows us to better reason about the expression (this is a form
+      // of expression canonicalization).
+      // While we're at it, catch some special cases for non-commutative ops.
+      switch (op) {
+      case BO_LT:
+      case BO_GT:
+      case BO_LE:
+      case BO_GE:
+        op = BinaryOperator::reverseComparisonOp(op);
+        // FALL-THROUGH
+      case BO_EQ:
+      case BO_NE:
+      case BO_Add:
+      case BO_Mul:
+      case BO_And:
+      case BO_Xor:
+      case BO_Or:
+        std::swap(lhs, rhs);
+        continue;
+      case BO_Shr:
+        // (~0)>>a
+        if (LHSValue.isAllOnesValue() && LHSValue.isSigned())
+          return evalCastFromNonLoc(lhs, resultTy);
+        // FALL-THROUGH
+      case BO_Shl:
+        // 0<<a and 0>>a
+        if (LHSValue == 0)
+          return evalCastFromNonLoc(lhs, resultTy);
+        return makeSymExprValNN(state, op, InputLHS, InputRHS, resultTy);
+      default:
+        return makeSymExprValNN(state, op, InputLHS, InputRHS, resultTy);
+      }
+    }
+    case nonloc::SymbolValKind: {
+      // We only handle LHS as simple symbols or SymIntExprs.
+      SymbolRef Sym = lhs.castAs<nonloc::SymbolVal>().getSymbol();
+
+      // LHS is a symbolic expression.
+      if (const SymIntExpr *symIntExpr = dyn_cast<SymIntExpr>(Sym)) {
+
+        // Is this a logical not? (!x is represented as x == 0.)
+        if (op == BO_EQ && rhs.isZeroConstant()) {
+          // We know how to negate certain expressions. Simplify them here.
+
+          BinaryOperator::Opcode opc = symIntExpr->getOpcode();
+          switch (opc) {
+          default:
+            // We don't know how to negate this operation.
+            // Just handle it as if it were a normal comparison to 0.
+            break;
+          case BO_LAnd:
+          case BO_LOr:
+            llvm_unreachable("Logical operators handled by branching logic.");
+          case BO_Assign:
+          case BO_MulAssign:
+          case BO_DivAssign:
+          case BO_RemAssign:
+          case BO_AddAssign:
+          case BO_SubAssign:
+          case BO_ShlAssign:
+          case BO_ShrAssign:
+          case BO_AndAssign:
+          case BO_XorAssign:
+          case BO_OrAssign:
+          case BO_Comma:
+            llvm_unreachable("'=' and ',' operators handled by ExprEngine.");
+          case BO_PtrMemD:
+          case BO_PtrMemI:
+            llvm_unreachable("Pointer arithmetic not handled here.");
+          case BO_LT:
+          case BO_GT:
+          case BO_LE:
+          case BO_GE:
+          case BO_EQ:
+          case BO_NE:
+            assert(resultTy->isBooleanType() ||
+                   resultTy == getConditionType());
+            assert(symIntExpr->getType()->isBooleanType() ||
+                   getContext().hasSameUnqualifiedType(symIntExpr->getType(),
+                                                       getConditionType()));
+            // Negate the comparison and make a value.
+            opc = BinaryOperator::negateComparisonOp(opc);
+            return makeNonLoc(symIntExpr->getLHS(), opc,
+                symIntExpr->getRHS(), resultTy);
+          }
+        }
+
+        // For now, only handle expressions whose RHS is a constant.
+        if (const llvm::APSInt *RHSValue = getKnownValue(state, rhs)) {
+          // If both the LHS and the current expression are additive,
+          // fold their constants and try again.
+          if (BinaryOperator::isAdditiveOp(op)) {
+            BinaryOperator::Opcode lop = symIntExpr->getOpcode();
+            if (BinaryOperator::isAdditiveOp(lop)) {
+              // Convert the two constants to a common type, then combine them.
+
+              // resultTy may not be the best type to convert to, but it's
+              // probably the best choice in expressions with mixed type
+              // (such as x+1U+2LL). The rules for implicit conversions should
+              // choose a reasonable type to preserve the expression, and will
+              // at least match how the value is going to be used.
+              APSIntType IntType = BasicVals.getAPSIntType(resultTy);
+              const llvm::APSInt &first = IntType.convert(symIntExpr->getRHS());
+              const llvm::APSInt &second = IntType.convert(*RHSValue);
+
+              const llvm::APSInt *newRHS;
+              if (lop == op)
+                newRHS = BasicVals.evalAPSInt(BO_Add, first, second);
+              else
+                newRHS = BasicVals.evalAPSInt(BO_Sub, first, second);
+
+              assert(newRHS && "Invalid operation despite common type!");
+              rhs = nonloc::ConcreteInt(*newRHS);
+              lhs = nonloc::SymbolVal(symIntExpr->getLHS());
+              op = lop;
+              continue;
+            }
+          }
+
+          // Otherwise, make a SymIntExpr out of the expression.
+          return MakeSymIntVal(symIntExpr, op, *RHSValue, resultTy);
+        }
+      }
+
+      // Does the symbolic expression simplify to a constant?
+      // If so, "fold" the constant by setting 'lhs' to a ConcreteInt
+      // and try again.
+      ConstraintManager &CMgr = state->getConstraintManager();
+      if (const llvm::APSInt *Constant = CMgr.getSymVal(state, Sym)) {
+        lhs = nonloc::ConcreteInt(*Constant);
+        continue;
+      }
+
+      // Is the RHS a constant?
+      if (const llvm::APSInt *RHSValue = getKnownValue(state, rhs))
+        return MakeSymIntVal(Sym, op, *RHSValue, resultTy);
+
+      // Give up -- this is not a symbolic expression we can handle.
+      return makeSymExprValNN(state, op, InputLHS, InputRHS, resultTy);
+    }
+    }
+  }
+}
+
+// FIXME: all this logic will change if/when we have MemRegion::getLocation().
+SVal SimpleSValBuilder::evalBinOpLL(ProgramStateRef state,
+                                  BinaryOperator::Opcode op,
+                                  Loc lhs, Loc rhs,
+                                  QualType resultTy) {
+  // Only comparisons and subtractions are valid operations on two pointers.
+  // See [C99 6.5.5 through 6.5.14] or [C++0x 5.6 through 5.15].
+  // However, if a pointer is casted to an integer, evalBinOpNN may end up
+  // calling this function with another operation (PR7527). We don't attempt to
+  // model this for now, but it could be useful, particularly when the
+  // "location" is actually an integer value that's been passed through a void*.
+  if (!(BinaryOperator::isComparisonOp(op) || op == BO_Sub))
+    return UnknownVal();
+
+  // Special cases for when both sides are identical.
+  if (lhs == rhs) {
+    switch (op) {
+    default:
+      llvm_unreachable("Unimplemented operation for two identical values");
+    case BO_Sub:
+      return makeZeroVal(resultTy);
+    case BO_EQ:
+    case BO_LE:
+    case BO_GE:
+      return makeTruthVal(true, resultTy);
+    case BO_NE:
+    case BO_LT:
+    case BO_GT:
+      return makeTruthVal(false, resultTy);
+    }
+  }
+
+  switch (lhs.getSubKind()) {
+  default:
+    llvm_unreachable("Ordering not implemented for this Loc.");
+
+  case loc::GotoLabelKind:
+    // The only thing we know about labels is that they're non-null.
+    if (rhs.isZeroConstant()) {
+      switch (op) {
+      default:
+        break;
+      case BO_Sub:
+        return evalCastFromLoc(lhs, resultTy);
+      case BO_EQ:
+      case BO_LE:
+      case BO_LT:
+        return makeTruthVal(false, resultTy);
+      case BO_NE:
+      case BO_GT:
+      case BO_GE:
+        return makeTruthVal(true, resultTy);
+      }
+    }
+    // There may be two labels for the same location, and a function region may
+    // have the same address as a label at the start of the function (depending
+    // on the ABI).
+    // FIXME: we can probably do a comparison against other MemRegions, though.
+    // FIXME: is there a way to tell if two labels refer to the same location?
+    return UnknownVal(); 
+
+  case loc::ConcreteIntKind: {
+    // If one of the operands is a symbol and the other is a constant,
+    // build an expression for use by the constraint manager.
+    if (SymbolRef rSym = rhs.getAsLocSymbol()) {
+      // We can only build expressions with symbols on the left,
+      // so we need a reversible operator.
+      if (!BinaryOperator::isComparisonOp(op))
+        return UnknownVal();
+
+      const llvm::APSInt &lVal = lhs.castAs<loc::ConcreteInt>().getValue();
+      op = BinaryOperator::reverseComparisonOp(op);
+      return makeNonLoc(rSym, op, lVal, resultTy);
+    }
+
+    // If both operands are constants, just perform the operation.
+    if (Optional<loc::ConcreteInt> rInt = rhs.getAs<loc::ConcreteInt>()) {
+      SVal ResultVal =
+          lhs.castAs<loc::ConcreteInt>().evalBinOp(BasicVals, op, *rInt);
+      if (Optional<NonLoc> Result = ResultVal.getAs<NonLoc>())
+        return evalCastFromNonLoc(*Result, resultTy);
+
+      assert(!ResultVal.getAs<Loc>() && "Loc-Loc ops should not produce Locs");
+      return UnknownVal();
+    }
+
+    // Special case comparisons against NULL.
+    // This must come after the test if the RHS is a symbol, which is used to
+    // build constraints. The address of any non-symbolic region is guaranteed
+    // to be non-NULL, as is any label.
+    assert(rhs.getAs<loc::MemRegionVal>() || rhs.getAs<loc::GotoLabel>());
+    if (lhs.isZeroConstant()) {
+      switch (op) {
+      default:
+        break;
+      case BO_EQ:
+      case BO_GT:
+      case BO_GE:
+        return makeTruthVal(false, resultTy);
+      case BO_NE:
+      case BO_LT:
+      case BO_LE:
+        return makeTruthVal(true, resultTy);
+      }
+    }
+
+    // Comparing an arbitrary integer to a region or label address is
+    // completely unknowable.
+    return UnknownVal();
+  }
+  case loc::MemRegionKind: {
+    if (Optional<loc::ConcreteInt> rInt = rhs.getAs<loc::ConcreteInt>()) {
+      // If one of the operands is a symbol and the other is a constant,
+      // build an expression for use by the constraint manager.
+      if (SymbolRef lSym = lhs.getAsLocSymbol())
+        return MakeSymIntVal(lSym, op, rInt->getValue(), resultTy);
+
+      // Special case comparisons to NULL.
+      // This must come after the test if the LHS is a symbol, which is used to
+      // build constraints. The address of any non-symbolic region is guaranteed
+      // to be non-NULL.
+      if (rInt->isZeroConstant()) {
+        switch (op) {
+        default:
+          break;
+        case BO_Sub:
+          return evalCastFromLoc(lhs, resultTy);
+        case BO_EQ:
+        case BO_LT:
+        case BO_LE:
+          return makeTruthVal(false, resultTy);
+        case BO_NE:
+        case BO_GT:
+        case BO_GE:
+          return makeTruthVal(true, resultTy);
+        }
+      }
+
+      // Comparing a region to an arbitrary integer is completely unknowable.
+      return UnknownVal();
+    }
+
+    // Get both values as regions, if possible.
+    const MemRegion *LeftMR = lhs.getAsRegion();
+    assert(LeftMR && "MemRegionKind SVal doesn't have a region!");
+
+    const MemRegion *RightMR = rhs.getAsRegion();
+    if (!RightMR)
+      // The RHS is probably a label, which in theory could address a region.
+      // FIXME: we can probably make a more useful statement about non-code
+      // regions, though.
+      return UnknownVal();
+
+    const MemRegion *LeftBase = LeftMR->getBaseRegion();
+    const MemRegion *RightBase = RightMR->getBaseRegion();
+    const MemSpaceRegion *LeftMS = LeftBase->getMemorySpace();
+    const MemSpaceRegion *RightMS = RightBase->getMemorySpace();
+    const MemSpaceRegion *UnknownMS = MemMgr.getUnknownRegion();
+
+    // If the two regions are from different known memory spaces they cannot be
+    // equal. Also, assume that no symbolic region (whose memory space is
+    // unknown) is on the stack.
+    if (LeftMS != RightMS &&
+        ((LeftMS != UnknownMS && RightMS != UnknownMS) ||
+         (isa<StackSpaceRegion>(LeftMS) || isa<StackSpaceRegion>(RightMS)))) {
+      switch (op) {
+      default:
+        return UnknownVal();
+      case BO_EQ:
+        return makeTruthVal(false, resultTy);
+      case BO_NE:
+        return makeTruthVal(true, resultTy);
+      }
+    }
+
+    // If both values wrap regions, see if they're from different base regions.
+    // Note, heap base symbolic regions are assumed to not alias with
+    // each other; for example, we assume that malloc returns different address
+    // on each invocation.
+    if (LeftBase != RightBase &&
+        ((!isa<SymbolicRegion>(LeftBase) && !isa<SymbolicRegion>(RightBase)) ||
+         (isa<HeapSpaceRegion>(LeftMS) || isa<HeapSpaceRegion>(RightMS))) ){
+      switch (op) {
+      default:
+        return UnknownVal();
+      case BO_EQ:
+        return makeTruthVal(false, resultTy);
+      case BO_NE:
+        return makeTruthVal(true, resultTy);
+      }
+    }
+
+    // FIXME: If/when there is a getAsRawOffset() for FieldRegions, this
+    // ElementRegion path and the FieldRegion path below should be unified.
+    if (const ElementRegion *LeftER = dyn_cast<ElementRegion>(LeftMR)) {
+      // First see if the right region is also an ElementRegion.
+      const ElementRegion *RightER = dyn_cast<ElementRegion>(RightMR);
+      if (!RightER)
+        return UnknownVal();
+
+      // Next, see if the two ERs have the same super-region and matching types.
+      // FIXME: This should do something useful even if the types don't match,
+      // though if both indexes are constant the RegionRawOffset path will
+      // give the correct answer.
+      if (LeftER->getSuperRegion() == RightER->getSuperRegion() &&
+          LeftER->getElementType() == RightER->getElementType()) {
+        // Get the left index and cast it to the correct type.
+        // If the index is unknown or undefined, bail out here.
+        SVal LeftIndexVal = LeftER->getIndex();
+        Optional<NonLoc> LeftIndex = LeftIndexVal.getAs<NonLoc>();
+        if (!LeftIndex)
+          return UnknownVal();
+        LeftIndexVal = evalCastFromNonLoc(*LeftIndex, ArrayIndexTy);
+        LeftIndex = LeftIndexVal.getAs<NonLoc>();
+        if (!LeftIndex)
+          return UnknownVal();
+
+        // Do the same for the right index.
+        SVal RightIndexVal = RightER->getIndex();
+        Optional<NonLoc> RightIndex = RightIndexVal.getAs<NonLoc>();
+        if (!RightIndex)
+          return UnknownVal();
+        RightIndexVal = evalCastFromNonLoc(*RightIndex, ArrayIndexTy);
+        RightIndex = RightIndexVal.getAs<NonLoc>();
+        if (!RightIndex)
+          return UnknownVal();
+
+        // Actually perform the operation.
+        // evalBinOpNN expects the two indexes to already be the right type.
+        return evalBinOpNN(state, op, *LeftIndex, *RightIndex, resultTy);
+      }
+
+      // If the element indexes aren't comparable, see if the raw offsets are.
+      RegionRawOffset LeftOffset = LeftER->getAsArrayOffset();
+      RegionRawOffset RightOffset = RightER->getAsArrayOffset();
+
+      if (LeftOffset.getRegion() != NULL &&
+          LeftOffset.getRegion() == RightOffset.getRegion()) {
+        CharUnits left = LeftOffset.getOffset();
+        CharUnits right = RightOffset.getOffset();
+
+        switch (op) {
+        default:
+          return UnknownVal();
+        case BO_LT:
+          return makeTruthVal(left < right, resultTy);
+        case BO_GT:
+          return makeTruthVal(left > right, resultTy);
+        case BO_LE:
+          return makeTruthVal(left <= right, resultTy);
+        case BO_GE:
+          return makeTruthVal(left >= right, resultTy);
+        case BO_EQ:
+          return makeTruthVal(left == right, resultTy);
+        case BO_NE:
+          return makeTruthVal(left != right, resultTy);
+        }
+      }
+
+      // If we get here, we have no way of comparing the ElementRegions.
+    }
+
+    // See if both regions are fields of the same structure.
+    // FIXME: This doesn't handle nesting, inheritance, or Objective-C ivars.
+    if (const FieldRegion *LeftFR = dyn_cast<FieldRegion>(LeftMR)) {
+      // Only comparisons are meaningful here!
+      if (!BinaryOperator::isComparisonOp(op))
+        return UnknownVal();
+
+      // First see if the right region is also a FieldRegion.
+      const FieldRegion *RightFR = dyn_cast<FieldRegion>(RightMR);
+      if (!RightFR)
+        return UnknownVal();
+
+      // Next, see if the two FRs have the same super-region.
+      // FIXME: This doesn't handle casts yet, and simply stripping the casts
+      // doesn't help.
+      if (LeftFR->getSuperRegion() != RightFR->getSuperRegion())
+        return UnknownVal();
+
+      const FieldDecl *LeftFD = LeftFR->getDecl();
+      const FieldDecl *RightFD = RightFR->getDecl();
+      const RecordDecl *RD = LeftFD->getParent();
+
+      // Make sure the two FRs are from the same kind of record. Just in case!
+      // FIXME: This is probably where inheritance would be a problem.
+      if (RD != RightFD->getParent())
+        return UnknownVal();
+
+      // We know for sure that the two fields are not the same, since that
+      // would have given us the same SVal.
+      if (op == BO_EQ)
+        return makeTruthVal(false, resultTy);
+      if (op == BO_NE)
+        return makeTruthVal(true, resultTy);
+
+      // Iterate through the fields and see which one comes first.
+      // [C99 6.7.2.1.13] "Within a structure object, the non-bit-field
+      // members and the units in which bit-fields reside have addresses that
+      // increase in the order in which they are declared."
+      bool leftFirst = (op == BO_LT || op == BO_LE);
+      for (RecordDecl::field_iterator I = RD->field_begin(),
+           E = RD->field_end(); I!=E; ++I) {
+        if (*I == LeftFD)
+          return makeTruthVal(leftFirst, resultTy);
+        if (*I == RightFD)
+          return makeTruthVal(!leftFirst, resultTy);
+      }
+
+      llvm_unreachable("Fields not found in parent record's definition");
+    }
+
+    // At this point we're not going to get a good answer, but we can try
+    // conjuring an expression instead.
+    SymbolRef LHSSym = lhs.getAsLocSymbol();
+    SymbolRef RHSSym = rhs.getAsLocSymbol();
+    if (LHSSym && RHSSym)
+      return makeNonLoc(LHSSym, op, RHSSym, resultTy);
+
+    // If we get here, we have no way of comparing the regions.
+    return UnknownVal();
+  }
+  }
+}
+
+SVal SimpleSValBuilder::evalBinOpLN(ProgramStateRef state,
+                                  BinaryOperator::Opcode op,
+                                  Loc lhs, NonLoc rhs, QualType resultTy) {
+  
+  // Special case: rhs is a zero constant.
+  if (rhs.isZeroConstant())
+    return lhs;
+  
+  // Special case: 'rhs' is an integer that has the same width as a pointer and
+  // we are using the integer location in a comparison.  Normally this cannot be
+  // triggered, but transfer functions like those for OSCompareAndSwapBarrier32
+  // can generate comparisons that trigger this code.
+  // FIXME: Are all locations guaranteed to have pointer width?
+  if (BinaryOperator::isComparisonOp(op)) {
+    if (Optional<nonloc::ConcreteInt> rhsInt =
+            rhs.getAs<nonloc::ConcreteInt>()) {
+      const llvm::APSInt *x = &rhsInt->getValue();
+      ASTContext &ctx = Context;
+      if (ctx.getTypeSize(ctx.VoidPtrTy) == x->getBitWidth()) {
+        // Convert the signedness of the integer (if necessary).
+        if (x->isSigned())
+          x = &getBasicValueFactory().getValue(*x, true);
+
+        return evalBinOpLL(state, op, lhs, loc::ConcreteInt(*x), resultTy);
+      }
+    }
+    return UnknownVal();
+  }
+  
+  // We are dealing with pointer arithmetic.
+
+  // Handle pointer arithmetic on constant values.
+  if (Optional<nonloc::ConcreteInt> rhsInt = rhs.getAs<nonloc::ConcreteInt>()) {
+    if (Optional<loc::ConcreteInt> lhsInt = lhs.getAs<loc::ConcreteInt>()) {
+      const llvm::APSInt &leftI = lhsInt->getValue();
+      assert(leftI.isUnsigned());
+      llvm::APSInt rightI(rhsInt->getValue(), /* isUnsigned */ true);
+
+      // Convert the bitwidth of rightI.  This should deal with overflow
+      // since we are dealing with concrete values.
+      rightI = rightI.extOrTrunc(leftI.getBitWidth());
+
+      // Offset the increment by the pointer size.
+      llvm::APSInt Multiplicand(rightI.getBitWidth(), /* isUnsigned */ true);
+      rightI *= Multiplicand;
+      
+      // Compute the adjusted pointer.
+      switch (op) {
+        case BO_Add:
+          rightI = leftI + rightI;
+          break;
+        case BO_Sub:
+          rightI = leftI - rightI;
+          break;
+        default:
+          llvm_unreachable("Invalid pointer arithmetic operation");
+      }
+      return loc::ConcreteInt(getBasicValueFactory().getValue(rightI));
+    }
+  }
+
+  // Handle cases where 'lhs' is a region.
+  if (const MemRegion *region = lhs.getAsRegion()) {
+    rhs = convertToArrayIndex(rhs).castAs<NonLoc>();
+    SVal index = UnknownVal();
+    const MemRegion *superR = 0;
+    QualType elementType;
+
+    if (const ElementRegion *elemReg = dyn_cast<ElementRegion>(region)) {
+      assert(op == BO_Add || op == BO_Sub);
+      index = evalBinOpNN(state, op, elemReg->getIndex(), rhs,
+                          getArrayIndexType());
+      superR = elemReg->getSuperRegion();
+      elementType = elemReg->getElementType();
+    }
+    else if (isa<SubRegion>(region)) {
+      superR = region;
+      index = rhs;
+      if (resultTy->isAnyPointerType())
+        elementType = resultTy->getPointeeType();
+    }
+
+    if (Optional<NonLoc> indexV = index.getAs<NonLoc>()) {
+      return loc::MemRegionVal(MemMgr.getElementRegion(elementType, *indexV,
+                                                       superR, getContext()));
+    }
+  }
+  return UnknownVal();  
+}
+
+const llvm::APSInt *SimpleSValBuilder::getKnownValue(ProgramStateRef state,
+                                                   SVal V) {
+  if (V.isUnknownOrUndef())
+    return NULL;
+
+  if (Optional<loc::ConcreteInt> X = V.getAs<loc::ConcreteInt>())
+    return &X->getValue();
+
+  if (Optional<nonloc::ConcreteInt> X = V.getAs<nonloc::ConcreteInt>())
+    return &X->getValue();
+
+  if (SymbolRef Sym = V.getAsSymbol())
+    return state->getConstraintManager().getSymVal(state, Sym);
+
+  // FIXME: Add support for SymExprs.
+  return NULL;
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/Store.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/Store.cpp
new file mode 100644
index 0000000..690ed08
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/Store.cpp
@@ -0,0 +1,518 @@
+//== Store.cpp - Interface for maps from Locations to Values ----*- C++ -*--==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defined the types Store and StoreManager.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+
+using namespace clang;
+using namespace ento;
+
+StoreManager::StoreManager(ProgramStateManager &stateMgr)
+  : svalBuilder(stateMgr.getSValBuilder()), StateMgr(stateMgr),
+    MRMgr(svalBuilder.getRegionManager()), Ctx(stateMgr.getContext()) {}
+
+StoreRef StoreManager::enterStackFrame(Store OldStore,
+                                       const CallEvent &Call,
+                                       const StackFrameContext *LCtx) {
+  StoreRef Store = StoreRef(OldStore, *this);
+
+  SmallVector<CallEvent::FrameBindingTy, 16> InitialBindings;
+  Call.getInitialStackFrameContents(LCtx, InitialBindings);
+
+  for (CallEvent::BindingsTy::iterator I = InitialBindings.begin(),
+                                       E = InitialBindings.end();
+       I != E; ++I) {
+    Store = Bind(Store.getStore(), I->first, I->second);
+  }
+
+  return Store;
+}
+
+const MemRegion *StoreManager::MakeElementRegion(const MemRegion *Base,
+                                              QualType EleTy, uint64_t index) {
+  NonLoc idx = svalBuilder.makeArrayIndex(index);
+  return MRMgr.getElementRegion(EleTy, idx, Base, svalBuilder.getContext());
+}
+
+// FIXME: Merge with the implementation of the same method in MemRegion.cpp
+static bool IsCompleteType(ASTContext &Ctx, QualType Ty) {
+  if (const RecordType *RT = Ty->getAs<RecordType>()) {
+    const RecordDecl *D = RT->getDecl();
+    if (!D->getDefinition())
+      return false;
+  }
+
+  return true;
+}
+
+StoreRef StoreManager::BindDefault(Store store, const MemRegion *R, SVal V) {
+  return StoreRef(store, *this);
+}
+
+const ElementRegion *StoreManager::GetElementZeroRegion(const MemRegion *R, 
+                                                        QualType T) {
+  NonLoc idx = svalBuilder.makeZeroArrayIndex();
+  assert(!T.isNull());
+  return MRMgr.getElementRegion(T, idx, R, Ctx);
+}
+
+const MemRegion *StoreManager::castRegion(const MemRegion *R, QualType CastToTy) {
+
+  ASTContext &Ctx = StateMgr.getContext();
+
+  // Handle casts to Objective-C objects.
+  if (CastToTy->isObjCObjectPointerType())
+    return R->StripCasts();
+
+  if (CastToTy->isBlockPointerType()) {
+    // FIXME: We may need different solutions, depending on the symbol
+    // involved.  Blocks can be casted to/from 'id', as they can be treated
+    // as Objective-C objects.  This could possibly be handled by enhancing
+    // our reasoning of downcasts of symbolic objects.
+    if (isa<CodeTextRegion>(R) || isa<SymbolicRegion>(R))
+      return R;
+
+    // We don't know what to make of it.  Return a NULL region, which
+    // will be interpretted as UnknownVal.
+    return NULL;
+  }
+
+  // Now assume we are casting from pointer to pointer. Other cases should
+  // already be handled.
+  QualType PointeeTy = CastToTy->getPointeeType();
+  QualType CanonPointeeTy = Ctx.getCanonicalType(PointeeTy);
+
+  // Handle casts to void*.  We just pass the region through.
+  if (CanonPointeeTy.getLocalUnqualifiedType() == Ctx.VoidTy)
+    return R;
+
+  // Handle casts from compatible types.
+  if (R->isBoundable())
+    if (const TypedValueRegion *TR = dyn_cast<TypedValueRegion>(R)) {
+      QualType ObjTy = Ctx.getCanonicalType(TR->getValueType());
+      if (CanonPointeeTy == ObjTy)
+        return R;
+    }
+
+  // Process region cast according to the kind of the region being cast.
+  switch (R->getKind()) {
+    case MemRegion::CXXThisRegionKind:
+    case MemRegion::GenericMemSpaceRegionKind:
+    case MemRegion::StackLocalsSpaceRegionKind:
+    case MemRegion::StackArgumentsSpaceRegionKind:
+    case MemRegion::HeapSpaceRegionKind:
+    case MemRegion::UnknownSpaceRegionKind:
+    case MemRegion::StaticGlobalSpaceRegionKind:
+    case MemRegion::GlobalInternalSpaceRegionKind:
+    case MemRegion::GlobalSystemSpaceRegionKind:
+    case MemRegion::GlobalImmutableSpaceRegionKind: {
+      llvm_unreachable("Invalid region cast");
+    }
+
+    case MemRegion::FunctionTextRegionKind:
+    case MemRegion::BlockTextRegionKind:
+    case MemRegion::BlockDataRegionKind:
+    case MemRegion::StringRegionKind:
+      // FIXME: Need to handle arbitrary downcasts.
+    case MemRegion::SymbolicRegionKind:
+    case MemRegion::AllocaRegionKind:
+    case MemRegion::CompoundLiteralRegionKind:
+    case MemRegion::FieldRegionKind:
+    case MemRegion::ObjCIvarRegionKind:
+    case MemRegion::ObjCStringRegionKind:
+    case MemRegion::VarRegionKind:
+    case MemRegion::CXXTempObjectRegionKind:
+    case MemRegion::CXXBaseObjectRegionKind:
+      return MakeElementRegion(R, PointeeTy);
+
+    case MemRegion::ElementRegionKind: {
+      // If we are casting from an ElementRegion to another type, the
+      // algorithm is as follows:
+      //
+      // (1) Compute the "raw offset" of the ElementRegion from the
+      //     base region.  This is done by calling 'getAsRawOffset()'.
+      //
+      // (2a) If we get a 'RegionRawOffset' after calling
+      //      'getAsRawOffset()', determine if the absolute offset
+      //      can be exactly divided into chunks of the size of the
+      //      casted-pointee type.  If so, create a new ElementRegion with
+      //      the pointee-cast type as the new ElementType and the index
+      //      being the offset divded by the chunk size.  If not, create
+      //      a new ElementRegion at offset 0 off the raw offset region.
+      //
+      // (2b) If we don't a get a 'RegionRawOffset' after calling
+      //      'getAsRawOffset()', it means that we are at offset 0.
+      //
+      // FIXME: Handle symbolic raw offsets.
+
+      const ElementRegion *elementR = cast<ElementRegion>(R);
+      const RegionRawOffset &rawOff = elementR->getAsArrayOffset();
+      const MemRegion *baseR = rawOff.getRegion();
+
+      // If we cannot compute a raw offset, throw up our hands and return
+      // a NULL MemRegion*.
+      if (!baseR)
+        return NULL;
+
+      CharUnits off = rawOff.getOffset();
+
+      if (off.isZero()) {
+        // Edge case: we are at 0 bytes off the beginning of baseR.  We
+        // check to see if type we are casting to is the same as the base
+        // region.  If so, just return the base region.
+        if (const TypedValueRegion *TR = dyn_cast<TypedValueRegion>(baseR)) {
+          QualType ObjTy = Ctx.getCanonicalType(TR->getValueType());
+          QualType CanonPointeeTy = Ctx.getCanonicalType(PointeeTy);
+          if (CanonPointeeTy == ObjTy)
+            return baseR;
+        }
+
+        // Otherwise, create a new ElementRegion at offset 0.
+        return MakeElementRegion(baseR, PointeeTy);
+      }
+
+      // We have a non-zero offset from the base region.  We want to determine
+      // if the offset can be evenly divided by sizeof(PointeeTy).  If so,
+      // we create an ElementRegion whose index is that value.  Otherwise, we
+      // create two ElementRegions, one that reflects a raw offset and the other
+      // that reflects the cast.
+
+      // Compute the index for the new ElementRegion.
+      int64_t newIndex = 0;
+      const MemRegion *newSuperR = 0;
+
+      // We can only compute sizeof(PointeeTy) if it is a complete type.
+      if (IsCompleteType(Ctx, PointeeTy)) {
+        // Compute the size in **bytes**.
+        CharUnits pointeeTySize = Ctx.getTypeSizeInChars(PointeeTy);
+        if (!pointeeTySize.isZero()) {
+          // Is the offset a multiple of the size?  If so, we can layer the
+          // ElementRegion (with elementType == PointeeTy) directly on top of
+          // the base region.
+          if (off % pointeeTySize == 0) {
+            newIndex = off / pointeeTySize;
+            newSuperR = baseR;
+          }
+        }
+      }
+
+      if (!newSuperR) {
+        // Create an intermediate ElementRegion to represent the raw byte.
+        // This will be the super region of the final ElementRegion.
+        newSuperR = MakeElementRegion(baseR, Ctx.CharTy, off.getQuantity());
+      }
+
+      return MakeElementRegion(newSuperR, PointeeTy, newIndex);
+    }
+  }
+
+  llvm_unreachable("unreachable");
+}
+
+static bool regionMatchesCXXRecordType(SVal V, QualType Ty) {
+  const MemRegion *MR = V.getAsRegion();
+  if (!MR)
+    return true;
+
+  const TypedValueRegion *TVR = dyn_cast<TypedValueRegion>(MR);
+  if (!TVR)
+    return true;
+
+  const CXXRecordDecl *RD = TVR->getValueType()->getAsCXXRecordDecl();
+  if (!RD)
+    return true;
+
+  const CXXRecordDecl *Expected = Ty->getPointeeCXXRecordDecl();
+  if (!Expected)
+    Expected = Ty->getAsCXXRecordDecl();
+
+  return Expected->getCanonicalDecl() == RD->getCanonicalDecl();
+}
+
+SVal StoreManager::evalDerivedToBase(SVal Derived, const CastExpr *Cast) {
+  // Sanity check to avoid doing the wrong thing in the face of
+  // reinterpret_cast.
+  if (!regionMatchesCXXRecordType(Derived, Cast->getSubExpr()->getType()))
+    return UnknownVal();
+
+  // Walk through the cast path to create nested CXXBaseRegions.
+  SVal Result = Derived;
+  for (CastExpr::path_const_iterator I = Cast->path_begin(),
+                                     E = Cast->path_end();
+       I != E; ++I) {
+    Result = evalDerivedToBase(Result, (*I)->getType(), (*I)->isVirtual());
+  }
+  return Result;
+}
+
+SVal StoreManager::evalDerivedToBase(SVal Derived, const CXXBasePath &Path) {
+  // Walk through the path to create nested CXXBaseRegions.
+  SVal Result = Derived;
+  for (CXXBasePath::const_iterator I = Path.begin(), E = Path.end();
+       I != E; ++I) {
+    Result = evalDerivedToBase(Result, I->Base->getType(),
+                               I->Base->isVirtual());
+  }
+  return Result;
+}
+
+SVal StoreManager::evalDerivedToBase(SVal Derived, QualType BaseType,
+                                     bool IsVirtual) {
+  Optional<loc::MemRegionVal> DerivedRegVal =
+      Derived.getAs<loc::MemRegionVal>();
+  if (!DerivedRegVal)
+    return Derived;
+
+  const CXXRecordDecl *BaseDecl = BaseType->getPointeeCXXRecordDecl();
+  if (!BaseDecl)
+    BaseDecl = BaseType->getAsCXXRecordDecl();
+  assert(BaseDecl && "not a C++ object?");
+
+  const MemRegion *BaseReg =
+    MRMgr.getCXXBaseObjectRegion(BaseDecl, DerivedRegVal->getRegion(),
+                                 IsVirtual);
+
+  return loc::MemRegionVal(BaseReg);
+}
+
+/// Returns the static type of the given region, if it represents a C++ class
+/// object.
+///
+/// This handles both fully-typed regions, where the dynamic type is known, and
+/// symbolic regions, where the dynamic type is merely bounded (and even then,
+/// only ostensibly!), but does not take advantage of any dynamic type info.
+static const CXXRecordDecl *getCXXRecordType(const MemRegion *MR) {
+  if (const TypedValueRegion *TVR = dyn_cast<TypedValueRegion>(MR))
+    return TVR->getValueType()->getAsCXXRecordDecl();
+  if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(MR))
+    return SR->getSymbol()->getType()->getPointeeCXXRecordDecl();
+  return 0;
+}
+
+SVal StoreManager::evalDynamicCast(SVal Base, QualType TargetType,
+                                   bool &Failed) {
+  Failed = false;
+
+  const MemRegion *MR = Base.getAsRegion();
+  if (!MR)
+    return UnknownVal();
+
+  // Assume the derived class is a pointer or a reference to a CXX record.
+  TargetType = TargetType->getPointeeType();
+  assert(!TargetType.isNull());
+  const CXXRecordDecl *TargetClass = TargetType->getAsCXXRecordDecl();
+  if (!TargetClass && !TargetType->isVoidType())
+    return UnknownVal();
+
+  // Drill down the CXXBaseObject chains, which represent upcasts (casts from
+  // derived to base).
+  while (const CXXRecordDecl *MRClass = getCXXRecordType(MR)) {
+    // If found the derived class, the cast succeeds.
+    if (MRClass == TargetClass)
+      return loc::MemRegionVal(MR);
+
+    if (!TargetType->isVoidType()) {
+      // Static upcasts are marked as DerivedToBase casts by Sema, so this will
+      // only happen when multiple or virtual inheritance is involved.
+      CXXBasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/true,
+                         /*DetectVirtual=*/false);
+      if (MRClass->isDerivedFrom(TargetClass, Paths))
+        return evalDerivedToBase(loc::MemRegionVal(MR), Paths.front());
+    }
+
+    if (const CXXBaseObjectRegion *BaseR = dyn_cast<CXXBaseObjectRegion>(MR)) {
+      // Drill down the chain to get the derived classes.
+      MR = BaseR->getSuperRegion();
+      continue;
+    }
+
+    // If this is a cast to void*, return the region.
+    if (TargetType->isVoidType())
+      return loc::MemRegionVal(MR);
+
+    // Strange use of reinterpret_cast can give us paths we don't reason
+    // about well, by putting in ElementRegions where we'd expect
+    // CXXBaseObjectRegions. If it's a valid reinterpret_cast (i.e. if the
+    // derived class has a zero offset from the base class), then it's safe
+    // to strip the cast; if it's invalid, -Wreinterpret-base-class should
+    // catch it. In the interest of performance, the analyzer will silently
+    // do the wrong thing in the invalid case (because offsets for subregions
+    // will be wrong).
+    const MemRegion *Uncasted = MR->StripCasts(/*IncludeBaseCasts=*/false);
+    if (Uncasted == MR) {
+      // We reached the bottom of the hierarchy and did not find the derived
+      // class. We we must be casting the base to derived, so the cast should
+      // fail.
+      break;
+    }
+
+    MR = Uncasted;
+  }
+
+  // We failed if the region we ended up with has perfect type info.
+  Failed = isa<TypedValueRegion>(MR);
+  return UnknownVal();
+}
+
+
+/// CastRetrievedVal - Used by subclasses of StoreManager to implement
+///  implicit casts that arise from loads from regions that are reinterpreted
+///  as another region.
+SVal StoreManager::CastRetrievedVal(SVal V, const TypedValueRegion *R,
+                                    QualType castTy, bool performTestOnly) {
+  
+  if (castTy.isNull() || V.isUnknownOrUndef())
+    return V;
+  
+  ASTContext &Ctx = svalBuilder.getContext();
+
+  if (performTestOnly) {  
+    // Automatically translate references to pointers.
+    QualType T = R->getValueType();
+    if (const ReferenceType *RT = T->getAs<ReferenceType>())
+      T = Ctx.getPointerType(RT->getPointeeType());
+    
+    assert(svalBuilder.getContext().hasSameUnqualifiedType(castTy, T));
+    return V;
+  }
+  
+  return svalBuilder.dispatchCast(V, castTy);
+}
+
+SVal StoreManager::getLValueFieldOrIvar(const Decl *D, SVal Base) {
+  if (Base.isUnknownOrUndef())
+    return Base;
+
+  Loc BaseL = Base.castAs<Loc>();
+  const MemRegion* BaseR = 0;
+
+  switch (BaseL.getSubKind()) {
+  case loc::MemRegionKind:
+    BaseR = BaseL.castAs<loc::MemRegionVal>().getRegion();
+    break;
+
+  case loc::GotoLabelKind:
+    // These are anormal cases. Flag an undefined value.
+    return UndefinedVal();
+
+  case loc::ConcreteIntKind:
+    // While these seem funny, this can happen through casts.
+    // FIXME: What we should return is the field offset.  For example,
+    //  add the field offset to the integer value.  That way funny things
+    //  like this work properly:  &(((struct foo *) 0xa)->f)
+    return Base;
+
+  default:
+    llvm_unreachable("Unhandled Base.");
+  }
+
+  // NOTE: We must have this check first because ObjCIvarDecl is a subclass
+  // of FieldDecl.
+  if (const ObjCIvarDecl *ID = dyn_cast<ObjCIvarDecl>(D))
+    return loc::MemRegionVal(MRMgr.getObjCIvarRegion(ID, BaseR));
+
+  return loc::MemRegionVal(MRMgr.getFieldRegion(cast<FieldDecl>(D), BaseR));
+}
+
+SVal StoreManager::getLValueIvar(const ObjCIvarDecl *decl, SVal base) {
+  return getLValueFieldOrIvar(decl, base);
+}
+
+SVal StoreManager::getLValueElement(QualType elementType, NonLoc Offset, 
+                                    SVal Base) {
+
+  // If the base is an unknown or undefined value, just return it back.
+  // FIXME: For absolute pointer addresses, we just return that value back as
+  //  well, although in reality we should return the offset added to that
+  //  value.
+  if (Base.isUnknownOrUndef() || Base.getAs<loc::ConcreteInt>())
+    return Base;
+
+  const MemRegion* BaseRegion = Base.castAs<loc::MemRegionVal>().getRegion();
+
+  // Pointer of any type can be cast and used as array base.
+  const ElementRegion *ElemR = dyn_cast<ElementRegion>(BaseRegion);
+
+  // Convert the offset to the appropriate size and signedness.
+  Offset = svalBuilder.convertToArrayIndex(Offset).castAs<NonLoc>();
+
+  if (!ElemR) {
+    //
+    // If the base region is not an ElementRegion, create one.
+    // This can happen in the following example:
+    //
+    //   char *p = __builtin_alloc(10);
+    //   p[1] = 8;
+    //
+    //  Observe that 'p' binds to an AllocaRegion.
+    //
+    return loc::MemRegionVal(MRMgr.getElementRegion(elementType, Offset,
+                                                    BaseRegion, Ctx));
+  }
+
+  SVal BaseIdx = ElemR->getIndex();
+
+  if (!BaseIdx.getAs<nonloc::ConcreteInt>())
+    return UnknownVal();
+
+  const llvm::APSInt &BaseIdxI =
+      BaseIdx.castAs<nonloc::ConcreteInt>().getValue();
+
+  // Only allow non-integer offsets if the base region has no offset itself.
+  // FIXME: This is a somewhat arbitrary restriction. We should be using
+  // SValBuilder here to add the two offsets without checking their types.
+  if (!Offset.getAs<nonloc::ConcreteInt>()) {
+    if (isa<ElementRegion>(BaseRegion->StripCasts()))
+      return UnknownVal();
+
+    return loc::MemRegionVal(MRMgr.getElementRegion(elementType, Offset,
+                                                    ElemR->getSuperRegion(),
+                                                    Ctx));
+  }
+
+  const llvm::APSInt& OffI = Offset.castAs<nonloc::ConcreteInt>().getValue();
+  assert(BaseIdxI.isSigned());
+
+  // Compute the new index.
+  nonloc::ConcreteInt NewIdx(svalBuilder.getBasicValueFactory().getValue(BaseIdxI +
+                                                                    OffI));
+
+  // Construct the new ElementRegion.
+  const MemRegion *ArrayR = ElemR->getSuperRegion();
+  return loc::MemRegionVal(MRMgr.getElementRegion(elementType, NewIdx, ArrayR,
+                                                  Ctx));
+}
+
+StoreManager::BindingsHandler::~BindingsHandler() {}
+
+bool StoreManager::FindUniqueBinding::HandleBinding(StoreManager& SMgr,
+                                                    Store store,
+                                                    const MemRegion* R,
+                                                    SVal val) {
+  SymbolRef SymV = val.getAsLocSymbol();
+  if (!SymV || SymV != Sym)
+    return true;
+
+  if (Binding) {
+    First = false;
+    return false;
+  }
+  else
+    Binding = R;
+
+  return true;
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/SubEngine.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/SubEngine.cpp
new file mode 100644
index 0000000..350f4b8
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/SubEngine.cpp
@@ -0,0 +1,14 @@
+//== SubEngine.cpp - Interface of the subengine of CoreEngine ------*- C++ -*-//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
+
+using namespace clang::ento;
+
+void SubEngine::anchor() { }
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
new file mode 100644
index 0000000..7c75b6c
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
@@ -0,0 +1,552 @@
+//== SymbolManager.h - Management of Symbolic Values ------------*- C++ -*--==//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines SymbolManager, a class that manages symbolic values
+//  created for use by ExprEngine and related classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+void SymExpr::anchor() { }
+
+void SymExpr::dump() const {
+  dumpToStream(llvm::errs());
+}
+
+void SymIntExpr::dumpToStream(raw_ostream &os) const {
+  os << '(';
+  getLHS()->dumpToStream(os);
+  os << ") "
+     << BinaryOperator::getOpcodeStr(getOpcode()) << ' '
+     << getRHS().getZExtValue();
+  if (getRHS().isUnsigned())
+    os << 'U';
+}
+
+void IntSymExpr::dumpToStream(raw_ostream &os) const {
+  os << getLHS().getZExtValue();
+  if (getLHS().isUnsigned())
+    os << 'U';
+  os << ' '
+     << BinaryOperator::getOpcodeStr(getOpcode())
+     << " (";
+  getRHS()->dumpToStream(os);
+  os << ')';
+}
+
+void SymSymExpr::dumpToStream(raw_ostream &os) const {
+  os << '(';
+  getLHS()->dumpToStream(os);
+  os << ") "
+     << BinaryOperator::getOpcodeStr(getOpcode())
+     << " (";
+  getRHS()->dumpToStream(os);
+  os << ')';
+}
+
+void SymbolCast::dumpToStream(raw_ostream &os) const {
+  os << '(' << ToTy.getAsString() << ") (";
+  Operand->dumpToStream(os);
+  os << ')';
+}
+
+void SymbolConjured::dumpToStream(raw_ostream &os) const {
+  os << "conj_$" << getSymbolID() << '{' << T.getAsString() << '}';
+}
+
+void SymbolDerived::dumpToStream(raw_ostream &os) const {
+  os << "derived_$" << getSymbolID() << '{'
+     << getParentSymbol() << ',' << getRegion() << '}';
+}
+
+void SymbolExtent::dumpToStream(raw_ostream &os) const {
+  os << "extent_$" << getSymbolID() << '{' << getRegion() << '}';
+}
+
+void SymbolMetadata::dumpToStream(raw_ostream &os) const {
+  os << "meta_$" << getSymbolID() << '{'
+     << getRegion() << ',' << T.getAsString() << '}';
+}
+
+void SymbolData::anchor() { }
+
+void SymbolRegionValue::dumpToStream(raw_ostream &os) const {
+  os << "reg_$" << getSymbolID() << "<" << R << ">";
+}
+
+bool SymExpr::symbol_iterator::operator==(const symbol_iterator &X) const {
+  return itr == X.itr;
+}
+
+bool SymExpr::symbol_iterator::operator!=(const symbol_iterator &X) const {
+  return itr != X.itr;
+}
+
+SymExpr::symbol_iterator::symbol_iterator(const SymExpr *SE) {
+  itr.push_back(SE);
+}
+
+SymExpr::symbol_iterator &SymExpr::symbol_iterator::operator++() {
+  assert(!itr.empty() && "attempting to iterate on an 'end' iterator");
+  expand();
+  return *this;
+}
+
+SymbolRef SymExpr::symbol_iterator::operator*() {
+  assert(!itr.empty() && "attempting to dereference an 'end' iterator");
+  return itr.back();
+}
+
+void SymExpr::symbol_iterator::expand() {
+  const SymExpr *SE = itr.back();
+  itr.pop_back();
+
+  switch (SE->getKind()) {
+    case SymExpr::RegionValueKind:
+    case SymExpr::ConjuredKind:
+    case SymExpr::DerivedKind:
+    case SymExpr::ExtentKind:
+    case SymExpr::MetadataKind:
+      return;
+    case SymExpr::CastSymbolKind:
+      itr.push_back(cast<SymbolCast>(SE)->getOperand());
+      return;
+    case SymExpr::SymIntKind:
+      itr.push_back(cast<SymIntExpr>(SE)->getLHS());
+      return;
+    case SymExpr::IntSymKind:
+      itr.push_back(cast<IntSymExpr>(SE)->getRHS());
+      return;
+    case SymExpr::SymSymKind: {
+      const SymSymExpr *x = cast<SymSymExpr>(SE);
+      itr.push_back(x->getLHS());
+      itr.push_back(x->getRHS());
+      return;
+    }
+  }
+  llvm_unreachable("unhandled expansion case");
+}
+
+unsigned SymExpr::computeComplexity() const {
+  unsigned R = 0;
+  for (symbol_iterator I = symbol_begin(), E = symbol_end(); I != E; ++I)
+    R++;
+  return R;
+}
+
+const SymbolRegionValue*
+SymbolManager::getRegionValueSymbol(const TypedValueRegion* R) {
+  llvm::FoldingSetNodeID profile;
+  SymbolRegionValue::Profile(profile, R);
+  void *InsertPos;
+  SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
+  if (!SD) {
+    SD = (SymExpr*) BPAlloc.Allocate<SymbolRegionValue>();
+    new (SD) SymbolRegionValue(SymbolCounter, R);
+    DataSet.InsertNode(SD, InsertPos);
+    ++SymbolCounter;
+  }
+
+  return cast<SymbolRegionValue>(SD);
+}
+
+const SymbolConjured* SymbolManager::conjureSymbol(const Stmt *E,
+                                                   const LocationContext *LCtx,
+                                                   QualType T,
+                                                   unsigned Count,
+                                                   const void *SymbolTag) {
+  llvm::FoldingSetNodeID profile;
+  SymbolConjured::Profile(profile, E, T, Count, LCtx, SymbolTag);
+  void *InsertPos;
+  SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
+  if (!SD) {
+    SD = (SymExpr*) BPAlloc.Allocate<SymbolConjured>();
+    new (SD) SymbolConjured(SymbolCounter, E, LCtx, T, Count, SymbolTag);
+    DataSet.InsertNode(SD, InsertPos);
+    ++SymbolCounter;
+  }
+
+  return cast<SymbolConjured>(SD);
+}
+
+const SymbolDerived*
+SymbolManager::getDerivedSymbol(SymbolRef parentSymbol,
+                                const TypedValueRegion *R) {
+
+  llvm::FoldingSetNodeID profile;
+  SymbolDerived::Profile(profile, parentSymbol, R);
+  void *InsertPos;
+  SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
+  if (!SD) {
+    SD = (SymExpr*) BPAlloc.Allocate<SymbolDerived>();
+    new (SD) SymbolDerived(SymbolCounter, parentSymbol, R);
+    DataSet.InsertNode(SD, InsertPos);
+    ++SymbolCounter;
+  }
+
+  return cast<SymbolDerived>(SD);
+}
+
+const SymbolExtent*
+SymbolManager::getExtentSymbol(const SubRegion *R) {
+  llvm::FoldingSetNodeID profile;
+  SymbolExtent::Profile(profile, R);
+  void *InsertPos;
+  SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
+  if (!SD) {
+    SD = (SymExpr*) BPAlloc.Allocate<SymbolExtent>();
+    new (SD) SymbolExtent(SymbolCounter, R);
+    DataSet.InsertNode(SD, InsertPos);
+    ++SymbolCounter;
+  }
+
+  return cast<SymbolExtent>(SD);
+}
+
+const SymbolMetadata*
+SymbolManager::getMetadataSymbol(const MemRegion* R, const Stmt *S, QualType T,
+                                 unsigned Count, const void *SymbolTag) {
+
+  llvm::FoldingSetNodeID profile;
+  SymbolMetadata::Profile(profile, R, S, T, Count, SymbolTag);
+  void *InsertPos;
+  SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
+  if (!SD) {
+    SD = (SymExpr*) BPAlloc.Allocate<SymbolMetadata>();
+    new (SD) SymbolMetadata(SymbolCounter, R, S, T, Count, SymbolTag);
+    DataSet.InsertNode(SD, InsertPos);
+    ++SymbolCounter;
+  }
+
+  return cast<SymbolMetadata>(SD);
+}
+
+const SymbolCast*
+SymbolManager::getCastSymbol(const SymExpr *Op,
+                             QualType From, QualType To) {
+  llvm::FoldingSetNodeID ID;
+  SymbolCast::Profile(ID, Op, From, To);
+  void *InsertPos;
+  SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos);
+  if (!data) {
+    data = (SymbolCast*) BPAlloc.Allocate<SymbolCast>();
+    new (data) SymbolCast(Op, From, To);
+    DataSet.InsertNode(data, InsertPos);
+  }
+
+  return cast<SymbolCast>(data);
+}
+
+const SymIntExpr *SymbolManager::getSymIntExpr(const SymExpr *lhs,
+                                               BinaryOperator::Opcode op,
+                                               const llvm::APSInt& v,
+                                               QualType t) {
+  llvm::FoldingSetNodeID ID;
+  SymIntExpr::Profile(ID, lhs, op, v, t);
+  void *InsertPos;
+  SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos);
+
+  if (!data) {
+    data = (SymIntExpr*) BPAlloc.Allocate<SymIntExpr>();
+    new (data) SymIntExpr(lhs, op, v, t);
+    DataSet.InsertNode(data, InsertPos);
+  }
+
+  return cast<SymIntExpr>(data);
+}
+
+const IntSymExpr *SymbolManager::getIntSymExpr(const llvm::APSInt& lhs,
+                                               BinaryOperator::Opcode op,
+                                               const SymExpr *rhs,
+                                               QualType t) {
+  llvm::FoldingSetNodeID ID;
+  IntSymExpr::Profile(ID, lhs, op, rhs, t);
+  void *InsertPos;
+  SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos);
+
+  if (!data) {
+    data = (IntSymExpr*) BPAlloc.Allocate<IntSymExpr>();
+    new (data) IntSymExpr(lhs, op, rhs, t);
+    DataSet.InsertNode(data, InsertPos);
+  }
+
+  return cast<IntSymExpr>(data);
+}
+
+const SymSymExpr *SymbolManager::getSymSymExpr(const SymExpr *lhs,
+                                               BinaryOperator::Opcode op,
+                                               const SymExpr *rhs,
+                                               QualType t) {
+  llvm::FoldingSetNodeID ID;
+  SymSymExpr::Profile(ID, lhs, op, rhs, t);
+  void *InsertPos;
+  SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos);
+
+  if (!data) {
+    data = (SymSymExpr*) BPAlloc.Allocate<SymSymExpr>();
+    new (data) SymSymExpr(lhs, op, rhs, t);
+    DataSet.InsertNode(data, InsertPos);
+  }
+
+  return cast<SymSymExpr>(data);
+}
+
+QualType SymbolConjured::getType() const {
+  return T;
+}
+
+QualType SymbolDerived::getType() const {
+  return R->getValueType();
+}
+
+QualType SymbolExtent::getType() const {
+  ASTContext &Ctx = R->getMemRegionManager()->getContext();
+  return Ctx.getSizeType();
+}
+
+QualType SymbolMetadata::getType() const {
+  return T;
+}
+
+QualType SymbolRegionValue::getType() const {
+  return R->getValueType();
+}
+
+SymbolManager::~SymbolManager() {
+  for (SymbolDependTy::const_iterator I = SymbolDependencies.begin(),
+       E = SymbolDependencies.end(); I != E; ++I) {
+    delete I->second;
+  }
+
+}
+
+bool SymbolManager::canSymbolicate(QualType T) {
+  T = T.getCanonicalType();
+
+  if (Loc::isLocType(T))
+    return true;
+
+  if (T->isIntegralOrEnumerationType())
+    return true;
+
+  if (T->isRecordType() && !T->isUnionType())
+    return true;
+
+  return false;
+}
+
+void SymbolManager::addSymbolDependency(const SymbolRef Primary,
+                                        const SymbolRef Dependent) {
+  SymbolDependTy::iterator I = SymbolDependencies.find(Primary);
+  SymbolRefSmallVectorTy *dependencies = 0;
+  if (I == SymbolDependencies.end()) {
+    dependencies = new SymbolRefSmallVectorTy();
+    SymbolDependencies[Primary] = dependencies;
+  } else {
+    dependencies = I->second;
+  }
+  dependencies->push_back(Dependent);
+}
+
+const SymbolRefSmallVectorTy *SymbolManager::getDependentSymbols(
+                                                     const SymbolRef Primary) {
+  SymbolDependTy::const_iterator I = SymbolDependencies.find(Primary);
+  if (I == SymbolDependencies.end())
+    return 0;
+  return I->second;
+}
+
+void SymbolReaper::markDependentsLive(SymbolRef sym) {
+  // Do not mark dependents more then once.
+  SymbolMapTy::iterator LI = TheLiving.find(sym);
+  assert(LI != TheLiving.end() && "The primary symbol is not live.");
+  if (LI->second == HaveMarkedDependents)
+    return;
+  LI->second = HaveMarkedDependents;
+
+  if (const SymbolRefSmallVectorTy *Deps = SymMgr.getDependentSymbols(sym)) {
+    for (SymbolRefSmallVectorTy::const_iterator I = Deps->begin(),
+                                                E = Deps->end(); I != E; ++I) {
+      if (TheLiving.find(*I) != TheLiving.end())
+        continue;
+      markLive(*I);
+    }
+  }
+}
+
+void SymbolReaper::markLive(SymbolRef sym) {
+  TheLiving[sym] = NotProcessed;
+  TheDead.erase(sym);
+  markDependentsLive(sym);
+}
+
+void SymbolReaper::markLive(const MemRegion *region) {
+  RegionRoots.insert(region);
+}
+
+void SymbolReaper::markInUse(SymbolRef sym) {
+  if (isa<SymbolMetadata>(sym))
+    MetadataInUse.insert(sym);
+}
+
+bool SymbolReaper::maybeDead(SymbolRef sym) {
+  if (isLive(sym))
+    return false;
+
+  TheDead.insert(sym);
+  return true;
+}
+
+bool SymbolReaper::isLiveRegion(const MemRegion *MR) {
+  if (RegionRoots.count(MR))
+    return true;
+  
+  MR = MR->getBaseRegion();
+
+  if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(MR))
+    return isLive(SR->getSymbol());
+
+  if (const VarRegion *VR = dyn_cast<VarRegion>(MR))
+    return isLive(VR, true);
+
+  // FIXME: This is a gross over-approximation. What we really need is a way to
+  // tell if anything still refers to this region. Unlike SymbolicRegions,
+  // AllocaRegions don't have associated symbols, though, so we don't actually
+  // have a way to track their liveness.
+  if (isa<AllocaRegion>(MR))
+    return true;
+
+  if (isa<CXXThisRegion>(MR))
+    return true;
+
+  if (isa<MemSpaceRegion>(MR))
+    return true;
+
+  return false;
+}
+
+bool SymbolReaper::isLive(SymbolRef sym) {
+  if (TheLiving.count(sym)) {
+    markDependentsLive(sym);
+    return true;
+  }
+  
+  bool KnownLive;
+  
+  switch (sym->getKind()) {
+  case SymExpr::RegionValueKind:
+    KnownLive = isLiveRegion(cast<SymbolRegionValue>(sym)->getRegion());
+    break;
+  case SymExpr::ConjuredKind:
+    KnownLive = false;
+    break;
+  case SymExpr::DerivedKind:
+    KnownLive = isLive(cast<SymbolDerived>(sym)->getParentSymbol());
+    break;
+  case SymExpr::ExtentKind:
+    KnownLive = isLiveRegion(cast<SymbolExtent>(sym)->getRegion());
+    break;
+  case SymExpr::MetadataKind:
+    KnownLive = MetadataInUse.count(sym) &&
+                isLiveRegion(cast<SymbolMetadata>(sym)->getRegion());
+    if (KnownLive)
+      MetadataInUse.erase(sym);
+    break;
+  case SymExpr::SymIntKind:
+    KnownLive = isLive(cast<SymIntExpr>(sym)->getLHS());
+    break;
+  case SymExpr::IntSymKind:
+    KnownLive = isLive(cast<IntSymExpr>(sym)->getRHS());
+    break;
+  case SymExpr::SymSymKind:
+    KnownLive = isLive(cast<SymSymExpr>(sym)->getLHS()) &&
+                isLive(cast<SymSymExpr>(sym)->getRHS());
+    break;
+  case SymExpr::CastSymbolKind:
+    KnownLive = isLive(cast<SymbolCast>(sym)->getOperand());
+    break;
+  }
+
+  if (KnownLive)
+    markLive(sym);
+
+  return KnownLive;
+}
+
+bool
+SymbolReaper::isLive(const Stmt *ExprVal, const LocationContext *ELCtx) const {
+  if (LCtx == 0)
+    return false;
+
+  if (LCtx != ELCtx) {
+    // If the reaper's location context is a parent of the expression's
+    // location context, then the expression value is now "out of scope".
+    if (LCtx->isParentOf(ELCtx))
+      return false;
+    return true;
+  }
+
+  // If no statement is provided, everything is this and parent contexts is live.
+  if (!Loc)
+    return true;
+
+  return LCtx->getAnalysis<RelaxedLiveVariables>()->isLive(Loc, ExprVal);
+}
+
+bool SymbolReaper::isLive(const VarRegion *VR, bool includeStoreBindings) const{
+  const StackFrameContext *VarContext = VR->getStackFrame();
+
+  if (!VarContext)
+    return true;
+
+  if (!LCtx)
+    return false;
+  const StackFrameContext *CurrentContext = LCtx->getCurrentStackFrame();
+
+  if (VarContext == CurrentContext) {
+    // If no statement is provided, everything is live.
+    if (!Loc)
+      return true;
+
+    if (LCtx->getAnalysis<RelaxedLiveVariables>()->isLive(Loc, VR->getDecl()))
+      return true;
+
+    if (!includeStoreBindings)
+      return false;
+    
+    unsigned &cachedQuery =
+      const_cast<SymbolReaper*>(this)->includedRegionCache[VR];
+
+    if (cachedQuery) {
+      return cachedQuery == 1;
+    }
+
+    // Query the store to see if the region occurs in any live bindings.
+    if (Store store = reapedStore.getStore()) {
+      bool hasRegion = 
+        reapedStore.getStoreManager().includedInBindings(store, VR);
+      cachedQuery = hasRegion ? 1 : 2;
+      return hasRegion;
+    }
+    
+    return false;
+  }
+
+  return VarContext->isParentOf(CurrentContext);
+}
+
+SymbolVisitor::~SymbolVisitor() {}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp
new file mode 100644
index 0000000..d5706d6
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp
@@ -0,0 +1,72 @@
+//===--- TextPathDiagnostics.cpp - Text Diagnostics for Paths ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines the TextPathDiagnostics object.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+using namespace ento;
+using namespace llvm;
+
+namespace {
+
+/// \brief Simple path diagnostic client used for outputting as diagnostic notes
+/// the sequence of events.
+class TextPathDiagnostics : public PathDiagnosticConsumer {
+  const std::string OutputFile;
+  DiagnosticsEngine &Diag;
+
+public:
+  TextPathDiagnostics(const std::string& output, DiagnosticsEngine &diag)
+    : OutputFile(output), Diag(diag) {}
+
+  void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
+                            FilesMade *filesMade);
+  
+  virtual StringRef getName() const {
+    return "TextPathDiagnostics";
+  }
+
+  PathGenerationScheme getGenerationScheme() const { return Minimal; }
+  bool supportsLogicalOpControlFlow() const { return true; }
+  bool supportsAllBlockEdges() const { return true; }
+  virtual bool supportsCrossFileDiagnostics() const { return true; }
+};
+
+} // end anonymous namespace
+
+void ento::createTextPathDiagnosticConsumer(AnalyzerOptions &AnalyzerOpts,
+                                            PathDiagnosticConsumers &C,
+                                            const std::string& out,
+                                            const Preprocessor &PP) {
+  C.push_back(new TextPathDiagnostics(out, PP.getDiagnostics()));
+}
+
+void TextPathDiagnostics::FlushDiagnosticsImpl(
+                              std::vector<const PathDiagnostic *> &Diags,
+                              FilesMade *) {
+  for (std::vector<const PathDiagnostic *>::iterator it = Diags.begin(),
+       et = Diags.end(); it != et; ++it) {
+    const PathDiagnostic *D = *it;
+
+    PathPieces FlatPath = D->path.flatten(/*ShouldFlattenMacros=*/true);
+    for (PathPieces::const_iterator I = FlatPath.begin(), E = FlatPath.end(); 
+         I != E; ++I) {
+      unsigned diagID =
+        Diag.getDiagnosticIDs()->getCustomDiagID(DiagnosticIDs::Note,
+                                                 (*I)->getString());
+      Diag.Report((*I)->getLocation().asLocation(), diagID);
+    }
+  }
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
new file mode 100644
index 0000000..d71e528
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -0,0 +1,784 @@
+//===--- AnalysisConsumer.cpp - ASTConsumer for running Analyses ----------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// "Meta" ASTConsumer for running different source analyses.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "AnalysisConsumer"
+
+#include "AnalysisConsumer.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/CallGraph.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/StaticAnalyzer/Checkers/LocalCheckers.h"
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Frontend/CheckerRegistration.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Program.h"
+#include "llvm/Support/Timer.h"
+#include "llvm/Support/raw_ostream.h"
+#include <queue>
+
+using namespace clang;
+using namespace ento;
+using llvm::SmallPtrSet;
+
+static ExplodedNode::Auditor* CreateUbiViz();
+
+STATISTIC(NumFunctionTopLevel, "The # of functions at top level.");
+STATISTIC(NumFunctionsAnalyzed,
+                      "The # of functions and blocks analyzed (as top level "
+                      "with inlining turned on).");
+STATISTIC(NumBlocksInAnalyzedFunctions,
+                      "The # of basic blocks in the analyzed functions.");
+STATISTIC(PercentReachableBlocks, "The % of reachable basic blocks.");
+STATISTIC(MaxCFGSize, "The maximum number of basic blocks in a function.");
+
+//===----------------------------------------------------------------------===//
+// Special PathDiagnosticConsumers.
+//===----------------------------------------------------------------------===//
+
+static void createPlistHTMLDiagnosticConsumer(AnalyzerOptions &AnalyzerOpts,
+                                              PathDiagnosticConsumers &C,
+                                              const std::string &prefix,
+                                              const Preprocessor &PP) {
+  createHTMLDiagnosticConsumer(AnalyzerOpts, C,
+                               llvm::sys::path::parent_path(prefix), PP);
+  createPlistDiagnosticConsumer(AnalyzerOpts, C, prefix, PP);
+}
+
+namespace {
+class ClangDiagPathDiagConsumer : public PathDiagnosticConsumer {
+  DiagnosticsEngine &Diag;
+public:
+  ClangDiagPathDiagConsumer(DiagnosticsEngine &Diag) : Diag(Diag) {}
+  virtual ~ClangDiagPathDiagConsumer() {}
+  virtual StringRef getName() const { return "ClangDiags"; }
+  virtual PathGenerationScheme getGenerationScheme() const { return None; }
+
+  void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
+                            FilesMade *filesMade) {
+    for (std::vector<const PathDiagnostic*>::iterator I = Diags.begin(),
+         E = Diags.end(); I != E; ++I) {
+      const PathDiagnostic *PD = *I;
+      StringRef desc = PD->getShortDescription();
+      SmallString<512> TmpStr;
+      llvm::raw_svector_ostream Out(TmpStr);
+      for (StringRef::iterator I=desc.begin(), E=desc.end(); I!=E; ++I) {
+        if (*I == '%')
+          Out << "%%";
+        else
+          Out << *I;
+      }
+      Out.flush();
+      unsigned ErrorDiag = Diag.getCustomDiagID(DiagnosticsEngine::Warning,
+                                                TmpStr);
+      SourceLocation L = PD->getLocation().asLocation();
+      DiagnosticBuilder diagBuilder = Diag.Report(L, ErrorDiag);
+
+      // Get the ranges from the last point in the path.
+      ArrayRef<SourceRange> Ranges = PD->path.back()->getRanges();
+
+      for (ArrayRef<SourceRange>::iterator I = Ranges.begin(),
+                                           E = Ranges.end(); I != E; ++I) {
+        diagBuilder << *I;
+      }
+    }
+  }
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// AnalysisConsumer declaration.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class AnalysisConsumer : public ASTConsumer,
+                         public RecursiveASTVisitor<AnalysisConsumer> {
+  enum {
+    AM_None = 0,
+    AM_Syntax = 0x1,
+    AM_Path = 0x2
+  };
+  typedef unsigned AnalysisMode;
+
+  /// Mode of the analyzes while recursively visiting Decls.
+  AnalysisMode RecVisitorMode;
+  /// Bug Reporter to use while recursively visiting Decls.
+  BugReporter *RecVisitorBR;
+
+public:
+  ASTContext *Ctx;
+  const Preprocessor &PP;
+  const std::string OutDir;
+  AnalyzerOptionsRef Opts;
+  ArrayRef<std::string> Plugins;
+
+  /// \brief Stores the declarations from the local translation unit.
+  /// Note, we pre-compute the local declarations at parse time as an
+  /// optimization to make sure we do not deserialize everything from disk.
+  /// The local declaration to all declarations ratio might be very small when
+  /// working with a PCH file.
+  SetOfDecls LocalTUDecls;
+                           
+  // Set of PathDiagnosticConsumers.  Owned by AnalysisManager.
+  PathDiagnosticConsumers PathConsumers;
+
+  StoreManagerCreator CreateStoreMgr;
+  ConstraintManagerCreator CreateConstraintMgr;
+
+  OwningPtr<CheckerManager> checkerMgr;
+  OwningPtr<AnalysisManager> Mgr;
+
+  /// Time the analyzes time of each translation unit.
+  static llvm::Timer* TUTotalTimer;
+
+  /// The information about analyzed functions shared throughout the
+  /// translation unit.
+  FunctionSummariesTy FunctionSummaries;
+
+  AnalysisConsumer(const Preprocessor& pp,
+                   const std::string& outdir,
+                   AnalyzerOptionsRef opts,
+                   ArrayRef<std::string> plugins)
+    : RecVisitorMode(0), RecVisitorBR(0),
+      Ctx(0), PP(pp), OutDir(outdir), Opts(opts), Plugins(plugins) {
+    DigestAnalyzerOptions();
+    if (Opts->PrintStats) {
+      llvm::EnableStatistics();
+      TUTotalTimer = new llvm::Timer("Analyzer Total Time");
+    }
+  }
+
+  ~AnalysisConsumer() {
+    if (Opts->PrintStats)
+      delete TUTotalTimer;
+  }
+
+  void DigestAnalyzerOptions() {
+    // Create the PathDiagnosticConsumer.
+    PathConsumers.push_back(new ClangDiagPathDiagConsumer(PP.getDiagnostics()));
+
+    if (!OutDir.empty()) {
+      switch (Opts->AnalysisDiagOpt) {
+      default:
+#define ANALYSIS_DIAGNOSTICS(NAME, CMDFLAG, DESC, CREATEFN, AUTOCREATE) \
+        case PD_##NAME: CREATEFN(*Opts.getPtr(), PathConsumers, OutDir, PP);\
+        break;
+#include "clang/StaticAnalyzer/Core/Analyses.def"
+      }
+    } else if (Opts->AnalysisDiagOpt == PD_TEXT) {
+      // Create the text client even without a specified output file since
+      // it just uses diagnostic notes.
+      createTextPathDiagnosticConsumer(*Opts.getPtr(), PathConsumers, "", PP);
+    }
+
+    // Create the analyzer component creators.
+    switch (Opts->AnalysisStoreOpt) {
+    default:
+      llvm_unreachable("Unknown store manager.");
+#define ANALYSIS_STORE(NAME, CMDFLAG, DESC, CREATEFN)           \
+      case NAME##Model: CreateStoreMgr = CREATEFN; break;
+#include "clang/StaticAnalyzer/Core/Analyses.def"
+    }
+
+    switch (Opts->AnalysisConstraintsOpt) {
+    default:
+      llvm_unreachable("Unknown constraint manager.");
+#define ANALYSIS_CONSTRAINTS(NAME, CMDFLAG, DESC, CREATEFN)     \
+      case NAME##Model: CreateConstraintMgr = CREATEFN; break;
+#include "clang/StaticAnalyzer/Core/Analyses.def"
+    }
+  }
+
+  void DisplayFunction(const Decl *D, AnalysisMode Mode,
+                       ExprEngine::InliningModes IMode) {
+    if (!Opts->AnalyzerDisplayProgress)
+      return;
+
+    SourceManager &SM = Mgr->getASTContext().getSourceManager();
+    PresumedLoc Loc = SM.getPresumedLoc(D->getLocation());
+    if (Loc.isValid()) {
+      llvm::errs() << "ANALYZE";
+
+      if (Mode == AM_Syntax)
+        llvm::errs() << " (Syntax)";
+      else if (Mode == AM_Path) {
+        llvm::errs() << " (Path, ";
+        switch (IMode) {
+          case ExprEngine::Inline_Minimal:
+            llvm::errs() << " Inline_Minimal";
+            break;
+          case ExprEngine::Inline_Regular:
+            llvm::errs() << " Inline_Regular";
+            break;
+        }
+        llvm::errs() << ")";
+      }
+      else
+        assert(Mode == (AM_Syntax | AM_Path) && "Unexpected mode!");
+
+      llvm::errs() << ": " << Loc.getFilename();
+      if (isa<FunctionDecl>(D) || isa<ObjCMethodDecl>(D)) {
+        const NamedDecl *ND = cast<NamedDecl>(D);
+        llvm::errs() << ' ' << *ND << '\n';
+      }
+      else if (isa<BlockDecl>(D)) {
+        llvm::errs() << ' ' << "block(line:" << Loc.getLine() << ",col:"
+                     << Loc.getColumn() << '\n';
+      }
+      else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+        Selector S = MD->getSelector();
+        llvm::errs() << ' ' << S.getAsString();
+      }
+    }
+  }
+
+  virtual void Initialize(ASTContext &Context) {
+    Ctx = &Context;
+    checkerMgr.reset(createCheckerManager(*Opts, PP.getLangOpts(), Plugins,
+                                          PP.getDiagnostics()));
+    Mgr.reset(new AnalysisManager(*Ctx,
+                                  PP.getDiagnostics(),
+                                  PP.getLangOpts(),
+                                  PathConsumers,
+                                  CreateStoreMgr,
+                                  CreateConstraintMgr,
+                                  checkerMgr.get(),
+                                  *Opts));
+  }
+
+  /// \brief Store the top level decls in the set to be processed later on.
+  /// (Doing this pre-processing avoids deserialization of data from PCH.)
+  virtual bool HandleTopLevelDecl(DeclGroupRef D);
+  virtual void HandleTopLevelDeclInObjCContainer(DeclGroupRef D);
+
+  virtual void HandleTranslationUnit(ASTContext &C);
+
+  /// \brief Determine which inlining mode should be used when this function is
+  /// analyzed. This allows to redefine the default inlining policies when
+  /// analyzing a given function.
+  ExprEngine::InliningModes
+  getInliningModeForFunction(const Decl *D, SetOfConstDecls Visited);
+
+  /// \brief Build the call graph for all the top level decls of this TU and
+  /// use it to define the order in which the functions should be visited.
+  void HandleDeclsCallGraph(const unsigned LocalTUDeclsSize);
+
+  /// \brief Run analyzes(syntax or path sensitive) on the given function.
+  /// \param Mode - determines if we are requesting syntax only or path
+  /// sensitive only analysis.
+  /// \param VisitedCallees - The output parameter, which is populated with the
+  /// set of functions which should be considered analyzed after analyzing the
+  /// given root function.
+  void HandleCode(Decl *D, AnalysisMode Mode,
+                  ExprEngine::InliningModes IMode = ExprEngine::Inline_Minimal,
+                  SetOfConstDecls *VisitedCallees = 0);
+
+  void RunPathSensitiveChecks(Decl *D,
+                              ExprEngine::InliningModes IMode,
+                              SetOfConstDecls *VisitedCallees);
+  void ActionExprEngine(Decl *D, bool ObjCGCEnabled,
+                        ExprEngine::InliningModes IMode,
+                        SetOfConstDecls *VisitedCallees);
+
+  /// Visitors for the RecursiveASTVisitor.
+  bool shouldWalkTypesOfTypeLocs() const { return false; }
+
+  /// Handle callbacks for arbitrary Decls.
+  bool VisitDecl(Decl *D) {
+    AnalysisMode Mode = getModeForDecl(D, RecVisitorMode);
+    if (Mode & AM_Syntax)
+      checkerMgr->runCheckersOnASTDecl(D, *Mgr, *RecVisitorBR);
+    return true;
+  }
+
+  bool VisitFunctionDecl(FunctionDecl *FD) {
+    IdentifierInfo *II = FD->getIdentifier();
+    if (II && II->getName().startswith("__inline"))
+      return true;
+
+    // We skip function template definitions, as their semantics is
+    // only determined when they are instantiated.
+    if (FD->isThisDeclarationADefinition() &&
+        !FD->isDependentContext()) {
+      assert(RecVisitorMode == AM_Syntax || Mgr->shouldInlineCall() == false);
+      HandleCode(FD, RecVisitorMode);
+    }
+    return true;
+  }
+
+  bool VisitObjCMethodDecl(ObjCMethodDecl *MD) {
+    if (MD->isThisDeclarationADefinition()) {
+      assert(RecVisitorMode == AM_Syntax || Mgr->shouldInlineCall() == false);
+      HandleCode(MD, RecVisitorMode);
+    }
+    return true;
+  }
+  
+  bool VisitBlockDecl(BlockDecl *BD) {
+    if (BD->hasBody()) {
+      assert(RecVisitorMode == AM_Syntax || Mgr->shouldInlineCall() == false);
+      HandleCode(BD, RecVisitorMode);
+    }
+    return true;
+  }
+
+private:
+  void storeTopLevelDecls(DeclGroupRef DG);
+
+  /// \brief Check if we should skip (not analyze) the given function.
+  AnalysisMode getModeForDecl(Decl *D, AnalysisMode Mode);
+
+};
+} // end anonymous namespace
+
+
+//===----------------------------------------------------------------------===//
+// AnalysisConsumer implementation.
+//===----------------------------------------------------------------------===//
+llvm::Timer* AnalysisConsumer::TUTotalTimer = 0;
+
+bool AnalysisConsumer::HandleTopLevelDecl(DeclGroupRef DG) {
+  storeTopLevelDecls(DG);
+  return true;
+}
+
+void AnalysisConsumer::HandleTopLevelDeclInObjCContainer(DeclGroupRef DG) {
+  storeTopLevelDecls(DG);
+}
+
+void AnalysisConsumer::storeTopLevelDecls(DeclGroupRef DG) {
+  for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I) {
+
+    // Skip ObjCMethodDecl, wait for the objc container to avoid
+    // analyzing twice.
+    if (isa<ObjCMethodDecl>(*I))
+      continue;
+
+    LocalTUDecls.push_back(*I);
+  }
+}
+
+static bool shouldSkipFunction(const Decl *D,
+                               SetOfConstDecls Visited,
+                               SetOfConstDecls VisitedAsTopLevel) {
+  if (VisitedAsTopLevel.count(D))
+    return true;
+
+  // We want to re-analyse the functions as top level in the following cases:
+  // - The 'init' methods should be reanalyzed because
+  //   ObjCNonNilReturnValueChecker assumes that '[super init]' never returns
+  //   'nil' and unless we analyze the 'init' functions as top level, we will
+  //   not catch errors within defensive code.
+  // - We want to reanalyze all ObjC methods as top level to report Retain
+  //   Count naming convention errors more aggressively.
+  if (isa<ObjCMethodDecl>(D))
+    return false;
+
+  // Otherwise, if we visited the function before, do not reanalyze it.
+  return Visited.count(D);
+}
+
+ExprEngine::InliningModes
+AnalysisConsumer::getInliningModeForFunction(const Decl *D,
+                                             SetOfConstDecls Visited) {
+  // We want to reanalyze all ObjC methods as top level to report Retain
+  // Count naming convention errors more aggressively. But we should tune down
+  // inlining when reanalyzing an already inlined function.
+  if (Visited.count(D)) {
+    assert(isa<ObjCMethodDecl>(D) &&
+           "We are only reanalyzing ObjCMethods.");
+    const ObjCMethodDecl *ObjCM = cast<ObjCMethodDecl>(D);
+    if (ObjCM->getMethodFamily() != OMF_init)
+      return ExprEngine::Inline_Minimal;
+  }
+
+  return ExprEngine::Inline_Regular;
+}
+
+void AnalysisConsumer::HandleDeclsCallGraph(const unsigned LocalTUDeclsSize) {
+  // Build the Call Graph by adding all the top level declarations to the graph.
+  // Note: CallGraph can trigger deserialization of more items from a pch
+  // (though HandleInterestingDecl); triggering additions to LocalTUDecls.
+  // We rely on random access to add the initially processed Decls to CG.
+  CallGraph CG;
+  for (unsigned i = 0 ; i < LocalTUDeclsSize ; ++i) {
+    CG.addToCallGraph(LocalTUDecls[i]);
+  }
+
+  // Walk over all of the call graph nodes in topological order, so that we
+  // analyze parents before the children. Skip the functions inlined into
+  // the previously processed functions. Use external Visited set to identify
+  // inlined functions. The topological order allows the "do not reanalyze
+  // previously inlined function" performance heuristic to be triggered more
+  // often.
+  SetOfConstDecls Visited;
+  SetOfConstDecls VisitedAsTopLevel;
+  llvm::ReversePostOrderTraversal<clang::CallGraph*> RPOT(&CG);
+  for (llvm::ReversePostOrderTraversal<clang::CallGraph*>::rpo_iterator
+         I = RPOT.begin(), E = RPOT.end(); I != E; ++I) {
+    NumFunctionTopLevel++;
+
+    CallGraphNode *N = *I;
+    Decl *D = N->getDecl();
+    
+    // Skip the abstract root node.
+    if (!D)
+      continue;
+
+    // Skip the functions which have been processed already or previously
+    // inlined.
+    if (shouldSkipFunction(D, Visited, VisitedAsTopLevel))
+      continue;
+
+    // Analyze the function.
+    SetOfConstDecls VisitedCallees;
+
+    HandleCode(D, AM_Path, getInliningModeForFunction(D, Visited),
+               (Mgr->options.InliningMode == All ? 0 : &VisitedCallees));
+
+    // Add the visited callees to the global visited set.
+    for (SetOfConstDecls::iterator I = VisitedCallees.begin(),
+                                   E = VisitedCallees.end(); I != E; ++I) {
+        Visited.insert(*I);
+    }
+    VisitedAsTopLevel.insert(D);
+  }
+}
+
+void AnalysisConsumer::HandleTranslationUnit(ASTContext &C) {
+  // Don't run the actions if an error has occurred with parsing the file.
+  DiagnosticsEngine &Diags = PP.getDiagnostics();
+  if (Diags.hasErrorOccurred() || Diags.hasFatalErrorOccurred())
+    return;
+
+  {
+    if (TUTotalTimer) TUTotalTimer->startTimer();
+
+    // Introduce a scope to destroy BR before Mgr.
+    BugReporter BR(*Mgr);
+    TranslationUnitDecl *TU = C.getTranslationUnitDecl();
+    checkerMgr->runCheckersOnASTDecl(TU, *Mgr, BR);
+
+    // Run the AST-only checks using the order in which functions are defined.
+    // If inlining is not turned on, use the simplest function order for path
+    // sensitive analyzes as well.
+    RecVisitorMode = AM_Syntax;
+    if (!Mgr->shouldInlineCall())
+      RecVisitorMode |= AM_Path;
+    RecVisitorBR = &BR;
+
+    // Process all the top level declarations.
+    //
+    // Note: TraverseDecl may modify LocalTUDecls, but only by appending more
+    // entries.  Thus we don't use an iterator, but rely on LocalTUDecls
+    // random access.  By doing so, we automatically compensate for iterators
+    // possibly being invalidated, although this is a bit slower.
+    const unsigned LocalTUDeclsSize = LocalTUDecls.size();
+    for (unsigned i = 0 ; i < LocalTUDeclsSize ; ++i) {
+      TraverseDecl(LocalTUDecls[i]);
+    }
+
+    if (Mgr->shouldInlineCall())
+      HandleDeclsCallGraph(LocalTUDeclsSize);
+
+    // After all decls handled, run checkers on the entire TranslationUnit.
+    checkerMgr->runCheckersOnEndOfTranslationUnit(TU, *Mgr, BR);
+
+    RecVisitorBR = 0;
+  }
+
+  // Explicitly destroy the PathDiagnosticConsumer.  This will flush its output.
+  // FIXME: This should be replaced with something that doesn't rely on
+  // side-effects in PathDiagnosticConsumer's destructor. This is required when
+  // used with option -disable-free.
+  Mgr.reset(NULL);
+
+  if (TUTotalTimer) TUTotalTimer->stopTimer();
+
+  // Count how many basic blocks we have not covered.
+  NumBlocksInAnalyzedFunctions = FunctionSummaries.getTotalNumBasicBlocks();
+  if (NumBlocksInAnalyzedFunctions > 0)
+    PercentReachableBlocks =
+      (FunctionSummaries.getTotalNumVisitedBasicBlocks() * 100) /
+        NumBlocksInAnalyzedFunctions;
+
+}
+
+static std::string getFunctionName(const Decl *D) {
+  if (const ObjCMethodDecl *ID = dyn_cast<ObjCMethodDecl>(D)) {
+    return ID->getSelector().getAsString();
+  }
+  if (const FunctionDecl *ND = dyn_cast<FunctionDecl>(D)) {
+    IdentifierInfo *II = ND->getIdentifier();
+    if (II)
+      return II->getName();
+  }
+  return "";
+}
+
+AnalysisConsumer::AnalysisMode
+AnalysisConsumer::getModeForDecl(Decl *D, AnalysisMode Mode) {
+  if (!Opts->AnalyzeSpecificFunction.empty() &&
+      getFunctionName(D) != Opts->AnalyzeSpecificFunction)
+    return AM_None;
+
+  // Unless -analyze-all is specified, treat decls differently depending on
+  // where they came from:
+  // - Main source file: run both path-sensitive and non-path-sensitive checks.
+  // - Header files: run non-path-sensitive checks only.
+  // - System headers: don't run any checks.
+  SourceManager &SM = Ctx->getSourceManager();
+  SourceLocation SL = SM.getExpansionLoc(D->getLocation());
+  if (!Opts->AnalyzeAll && !SM.isFromMainFile(SL)) {
+    if (SL.isInvalid() || SM.isInSystemHeader(SL))
+      return AM_None;
+    return Mode & ~AM_Path;
+  }
+
+  return Mode;
+}
+
+void AnalysisConsumer::HandleCode(Decl *D, AnalysisMode Mode,
+                                  ExprEngine::InliningModes IMode,
+                                  SetOfConstDecls *VisitedCallees) {
+  if (!D->hasBody())
+    return;
+  Mode = getModeForDecl(D, Mode);
+  if (Mode == AM_None)
+    return;
+
+  DisplayFunction(D, Mode, IMode);
+  CFG *DeclCFG = Mgr->getCFG(D);
+  if (DeclCFG) {
+    unsigned CFGSize = DeclCFG->size();
+    MaxCFGSize = MaxCFGSize < CFGSize ? CFGSize : MaxCFGSize;
+  }
+
+  // Clear the AnalysisManager of old AnalysisDeclContexts.
+  Mgr->ClearContexts();
+  BugReporter BR(*Mgr);
+
+  if (Mode & AM_Syntax)
+    checkerMgr->runCheckersOnASTBody(D, *Mgr, BR);
+  if ((Mode & AM_Path) && checkerMgr->hasPathSensitiveCheckers()) {
+    RunPathSensitiveChecks(D, IMode, VisitedCallees);
+    if (IMode != ExprEngine::Inline_Minimal)
+      NumFunctionsAnalyzed++;
+  }
+}
+
+//===----------------------------------------------------------------------===//
+// Path-sensitive checking.
+//===----------------------------------------------------------------------===//
+
+void AnalysisConsumer::ActionExprEngine(Decl *D, bool ObjCGCEnabled,
+                                        ExprEngine::InliningModes IMode,
+                                        SetOfConstDecls *VisitedCallees) {
+  // Construct the analysis engine.  First check if the CFG is valid.
+  // FIXME: Inter-procedural analysis will need to handle invalid CFGs.
+  if (!Mgr->getCFG(D))
+    return;
+
+  // See if the LiveVariables analysis scales.
+  if (!Mgr->getAnalysisDeclContext(D)->getAnalysis<RelaxedLiveVariables>())
+    return;
+
+  ExprEngine Eng(*Mgr, ObjCGCEnabled, VisitedCallees, &FunctionSummaries,IMode);
+
+  // Set the graph auditor.
+  OwningPtr<ExplodedNode::Auditor> Auditor;
+  if (Mgr->options.visualizeExplodedGraphWithUbiGraph) {
+    Auditor.reset(CreateUbiViz());
+    ExplodedNode::SetAuditor(Auditor.get());
+  }
+
+  // Execute the worklist algorithm.
+  Eng.ExecuteWorkList(Mgr->getAnalysisDeclContextManager().getStackFrame(D),
+                      Mgr->options.getMaxNodesPerTopLevelFunction());
+
+  // Release the auditor (if any) so that it doesn't monitor the graph
+  // created BugReporter.
+  ExplodedNode::SetAuditor(0);
+
+  // Visualize the exploded graph.
+  if (Mgr->options.visualizeExplodedGraphWithGraphViz)
+    Eng.ViewGraph(Mgr->options.TrimGraph);
+
+  // Display warnings.
+  Eng.getBugReporter().FlushReports();
+}
+
+void AnalysisConsumer::RunPathSensitiveChecks(Decl *D,
+                                              ExprEngine::InliningModes IMode,
+                                              SetOfConstDecls *Visited) {
+
+  switch (Mgr->getLangOpts().getGC()) {
+  case LangOptions::NonGC:
+    ActionExprEngine(D, false, IMode, Visited);
+    break;
+  
+  case LangOptions::GCOnly:
+    ActionExprEngine(D, true, IMode, Visited);
+    break;
+  
+  case LangOptions::HybridGC:
+    ActionExprEngine(D, false, IMode, Visited);
+    ActionExprEngine(D, true, IMode, Visited);
+    break;
+  }
+}
+
+//===----------------------------------------------------------------------===//
+// AnalysisConsumer creation.
+//===----------------------------------------------------------------------===//
+
+ASTConsumer* ento::CreateAnalysisConsumer(const Preprocessor& pp,
+                                          const std::string& outDir,
+                                          AnalyzerOptionsRef opts,
+                                          ArrayRef<std::string> plugins) {
+  // Disable the effects of '-Werror' when using the AnalysisConsumer.
+  pp.getDiagnostics().setWarningsAsErrors(false);
+
+  return new AnalysisConsumer(pp, outDir, opts, plugins);
+}
+
+//===----------------------------------------------------------------------===//
+// Ubigraph Visualization.  FIXME: Move to separate file.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class UbigraphViz : public ExplodedNode::Auditor {
+  OwningPtr<raw_ostream> Out;
+  llvm::sys::Path Dir, Filename;
+  unsigned Cntr;
+
+  typedef llvm::DenseMap<void*,unsigned> VMap;
+  VMap M;
+
+public:
+  UbigraphViz(raw_ostream *out, llvm::sys::Path& dir,
+              llvm::sys::Path& filename);
+
+  ~UbigraphViz();
+
+  virtual void AddEdge(ExplodedNode *Src, ExplodedNode *Dst);
+};
+
+} // end anonymous namespace
+
+static ExplodedNode::Auditor* CreateUbiViz() {
+  std::string ErrMsg;
+
+  llvm::sys::Path Dir = llvm::sys::Path::GetTemporaryDirectory(&ErrMsg);
+  if (!ErrMsg.empty())
+    return 0;
+
+  llvm::sys::Path Filename = Dir;
+  Filename.appendComponent("llvm_ubi");
+  Filename.makeUnique(true,&ErrMsg);
+
+  if (!ErrMsg.empty())
+    return 0;
+
+  llvm::errs() << "Writing '" << Filename.str() << "'.\n";
+
+  OwningPtr<llvm::raw_fd_ostream> Stream;
+  Stream.reset(new llvm::raw_fd_ostream(Filename.c_str(), ErrMsg));
+
+  if (!ErrMsg.empty())
+    return 0;
+
+  return new UbigraphViz(Stream.take(), Dir, Filename);
+}
+
+void UbigraphViz::AddEdge(ExplodedNode *Src, ExplodedNode *Dst) {
+
+  assert (Src != Dst && "Self-edges are not allowed.");
+
+  // Lookup the Src.  If it is a new node, it's a root.
+  VMap::iterator SrcI= M.find(Src);
+  unsigned SrcID;
+
+  if (SrcI == M.end()) {
+    M[Src] = SrcID = Cntr++;
+    *Out << "('vertex', " << SrcID << ", ('color','#00ff00'))\n";
+  }
+  else
+    SrcID = SrcI->second;
+
+  // Lookup the Dst.
+  VMap::iterator DstI= M.find(Dst);
+  unsigned DstID;
+
+  if (DstI == M.end()) {
+    M[Dst] = DstID = Cntr++;
+    *Out << "('vertex', " << DstID << ")\n";
+  }
+  else {
+    // We have hit DstID before.  Change its style to reflect a cache hit.
+    DstID = DstI->second;
+    *Out << "('change_vertex_style', " << DstID << ", 1)\n";
+  }
+
+  // Add the edge.
+  *Out << "('edge', " << SrcID << ", " << DstID
+       << ", ('arrow','true'), ('oriented', 'true'))\n";
+}
+
+UbigraphViz::UbigraphViz(raw_ostream *out, llvm::sys::Path& dir,
+                         llvm::sys::Path& filename)
+  : Out(out), Dir(dir), Filename(filename), Cntr(0) {
+
+  *Out << "('vertex_style_attribute', 0, ('shape', 'icosahedron'))\n";
+  *Out << "('vertex_style', 1, 0, ('shape', 'sphere'), ('color', '#ffcc66'),"
+          " ('size', '1.5'))\n";
+}
+
+UbigraphViz::~UbigraphViz() {
+  Out.reset(0);
+  llvm::errs() << "Running 'ubiviz' program... ";
+  std::string ErrMsg;
+  llvm::sys::Path Ubiviz = llvm::sys::Program::FindProgramByName("ubiviz");
+  std::vector<const char*> args;
+  args.push_back(Ubiviz.c_str());
+  args.push_back(Filename.c_str());
+  args.push_back(0);
+
+  if (llvm::sys::Program::ExecuteAndWait(Ubiviz, &args[0],0,0,0,0,&ErrMsg)) {
+    llvm::errs() << "Error viewing graph: " << ErrMsg << "\n";
+  }
+
+  // Delete the directory.
+  Dir.eraseFromDisk(true);
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.h b/safecode/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.h
new file mode 100644
index 0000000..b75220b
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.h
@@ -0,0 +1,43 @@
+//===--- AnalysisConsumer.h - Front-end Analysis Engine Hooks ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header contains the functions necessary for a front-end to run various
+// analyses.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_ANALYSISCONSUMER_H
+#define LLVM_CLANG_GR_ANALYSISCONSUMER_H
+
+#include "clang/Basic/LLVM.h"
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
+#include <string>
+
+namespace clang {
+
+class ASTConsumer;
+class Preprocessor;
+class DiagnosticsEngine;
+
+namespace ento {
+class CheckerManager;
+
+/// CreateAnalysisConsumer - Creates an ASTConsumer to run various code
+/// analysis passes.  (The set of analyses run is controlled by command-line
+/// options.)
+ASTConsumer* CreateAnalysisConsumer(const Preprocessor &pp,
+                                    const std::string &output,
+                                    AnalyzerOptionsRef opts,
+                                    ArrayRef<std::string> plugins);
+
+} // end GR namespace
+
+} // end clang namespace
+
+#endif
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Frontend/CMakeLists.txt b/safecode/tools/clang/lib/StaticAnalyzer/Frontend/CMakeLists.txt
new file mode 100644
index 0000000..aafb249
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Frontend/CMakeLists.txt
@@ -0,0 +1,31 @@
+set(LLVM_NO_RTTI 1)
+
+include_directories( ${CMAKE_CURRENT_BINARY_DIR}/../Checkers )
+
+add_clang_library(clangStaticAnalyzerFrontend
+  AnalysisConsumer.cpp
+  CheckerRegistration.cpp
+  FrontendActions.cpp
+  )
+
+add_dependencies(clangStaticAnalyzerFrontend
+  clangStaticAnalyzerCheckers
+  clangStaticAnalyzerCore
+  ClangAttrClasses
+  ClangAttrList
+  ClangCommentNodes
+  ClangDeclNodes
+  ClangDiagnosticCommon
+  ClangDiagnosticFrontend
+  ClangStmtNodes
+  )
+
+target_link_libraries(clangStaticAnalyzerFrontend
+  clangBasic
+  clangLex
+  clangAST
+  clangFrontend
+  clangRewriteCore
+  clangRewriteFrontend
+  clangStaticAnalyzerCheckers
+  )
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
new file mode 100644
index 0000000..e7def08
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
@@ -0,0 +1,134 @@
+//===--- CheckerRegistration.cpp - Registration for the Analyzer Checkers -===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines the registration function for the analyzer checkers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Frontend/CheckerRegistration.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/StaticAnalyzer/Checkers/ClangCheckers.h"
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/CheckerOptInfo.h"
+#include "clang/StaticAnalyzer/Core/CheckerRegistry.h"
+#include "clang/StaticAnalyzer/Frontend/FrontendActions.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+using llvm::sys::DynamicLibrary;
+
+namespace {
+class ClangCheckerRegistry : public CheckerRegistry {
+  typedef void (*RegisterCheckersFn)(CheckerRegistry &);
+
+  static bool isCompatibleAPIVersion(const char *versionString);
+  static void warnIncompatible(DiagnosticsEngine *diags, StringRef pluginPath,
+                               const char *pluginAPIVersion);
+
+public:
+  ClangCheckerRegistry(ArrayRef<std::string> plugins,
+                       DiagnosticsEngine *diags = 0);
+};
+  
+} // end anonymous namespace
+
+ClangCheckerRegistry::ClangCheckerRegistry(ArrayRef<std::string> plugins,
+                                           DiagnosticsEngine *diags) {
+  registerBuiltinCheckers(*this);
+
+  for (ArrayRef<std::string>::iterator i = plugins.begin(), e = plugins.end();
+       i != e; ++i) {
+    // Get access to the plugin.
+    DynamicLibrary lib = DynamicLibrary::getPermanentLibrary(i->c_str());
+
+    // See if it's compatible with this build of clang.
+    const char *pluginAPIVersion =
+      (const char *) lib.getAddressOfSymbol("clang_analyzerAPIVersionString");
+    if (!isCompatibleAPIVersion(pluginAPIVersion)) {
+      warnIncompatible(diags, *i, pluginAPIVersion);
+      continue;
+    }
+
+    // Register its checkers.
+    RegisterCheckersFn registerPluginCheckers =
+      (RegisterCheckersFn) (intptr_t) lib.getAddressOfSymbol(
+                                                      "clang_registerCheckers");
+    if (registerPluginCheckers)
+      registerPluginCheckers(*this);
+  }
+}
+
+bool ClangCheckerRegistry::isCompatibleAPIVersion(const char *versionString) {
+  // If the version string is null, it's not an analyzer plugin.
+  if (versionString == 0)
+    return false;
+
+  // For now, none of the static analyzer API is considered stable.
+  // Versions must match exactly.
+  if (strcmp(versionString, CLANG_ANALYZER_API_VERSION_STRING) == 0)
+    return true;
+
+  return false;
+}
+
+void ClangCheckerRegistry::warnIncompatible(DiagnosticsEngine *diags,
+                                            StringRef pluginPath,
+                                            const char *pluginAPIVersion) {
+  if (!diags)
+    return;
+  if (!pluginAPIVersion)
+    return;
+
+  diags->Report(diag::warn_incompatible_analyzer_plugin_api)
+      << llvm::sys::path::filename(pluginPath);
+  diags->Report(diag::note_incompatible_analyzer_plugin_api)
+      << CLANG_ANALYZER_API_VERSION_STRING
+      << pluginAPIVersion;
+}
+
+
+CheckerManager *ento::createCheckerManager(AnalyzerOptions &opts,
+                                           const LangOptions &langOpts,
+                                           ArrayRef<std::string> plugins,
+                                           DiagnosticsEngine &diags) {
+  OwningPtr<CheckerManager> checkerMgr(new CheckerManager(langOpts,
+                                                          &opts));
+
+  SmallVector<CheckerOptInfo, 8> checkerOpts;
+  for (unsigned i = 0, e = opts.CheckersControlList.size(); i != e; ++i) {
+    const std::pair<std::string, bool> &opt = opts.CheckersControlList[i];
+    checkerOpts.push_back(CheckerOptInfo(opt.first.c_str(), opt.second));
+  }
+
+  ClangCheckerRegistry allCheckers(plugins, &diags);
+  allCheckers.initializeManager(*checkerMgr, checkerOpts);
+  checkerMgr->finishedCheckerRegistration();
+
+  for (unsigned i = 0, e = checkerOpts.size(); i != e; ++i) {
+    if (checkerOpts[i].isUnclaimed())
+      diags.Report(diag::err_unknown_analyzer_checker)
+          << checkerOpts[i].getName();
+  }
+
+  return checkerMgr.take();
+}
+
+void ento::printCheckerHelp(raw_ostream &out, ArrayRef<std::string> plugins) {
+  out << "OVERVIEW: Clang Static Analyzer Checkers List\n\n";
+  out << "USAGE: -analyzer-checker <CHECKER or PACKAGE,...>\n\n";
+
+  ClangCheckerRegistry(plugins).printHelp(out);
+}
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Frontend/FrontendActions.cpp b/safecode/tools/clang/lib/StaticAnalyzer/Frontend/FrontendActions.cpp
new file mode 100644
index 0000000..13971af
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Frontend/FrontendActions.cpp
@@ -0,0 +1,23 @@
+//===--- FrontendActions.cpp ----------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Frontend/FrontendActions.h"
+#include "AnalysisConsumer.h"
+#include "clang/Frontend/CompilerInstance.h"
+using namespace clang;
+using namespace ento;
+
+ASTConsumer *AnalysisAction::CreateASTConsumer(CompilerInstance &CI,
+                                               StringRef InFile) {
+  return CreateAnalysisConsumer(CI.getPreprocessor(),
+                                CI.getFrontendOpts().OutputFile,
+                                CI.getAnalyzerOpts(),
+                                CI.getFrontendOpts().Plugins);
+}
+
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Frontend/Makefile b/safecode/tools/clang/lib/StaticAnalyzer/Frontend/Makefile
new file mode 100644
index 0000000..2698120
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Frontend/Makefile
@@ -0,0 +1,19 @@
+##===- clang/lib/StaticAnalyzer/Frontend/Makefile ----------*- Makefile -*-===##
+# 
+#                     The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+# 
+##===----------------------------------------------------------------------===##
+#
+# Starting point into the static analyzer land for the driver.
+#
+##===----------------------------------------------------------------------===##
+
+CLANG_LEVEL := ../../..
+LIBRARYNAME := clangStaticAnalyzerFrontend
+
+CPP.Flags += -I${PROJ_OBJ_DIR}/../Checkers
+
+include $(CLANG_LEVEL)/Makefile
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/Makefile b/safecode/tools/clang/lib/StaticAnalyzer/Makefile
new file mode 100644
index 0000000..c166f06
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/Makefile
@@ -0,0 +1,18 @@
+##===- clang/lib/StaticAnalyzer/Makefile -------------------*- Makefile -*-===##
+# 
+#                     The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+# 
+##===----------------------------------------------------------------------===##
+#
+# This implements analyses built on top of source-level CFGs. 
+#
+##===----------------------------------------------------------------------===##
+
+CLANG_LEVEL := ../..
+DIRS := Checkers Frontend
+PARALLEL_DIRS := Core
+
+include $(CLANG_LEVEL)/Makefile
diff --git a/safecode/tools/clang/lib/StaticAnalyzer/README.txt b/safecode/tools/clang/lib/StaticAnalyzer/README.txt
new file mode 100644
index 0000000..d4310c5
--- /dev/null
+++ b/safecode/tools/clang/lib/StaticAnalyzer/README.txt
@@ -0,0 +1,139 @@
+//===----------------------------------------------------------------------===//
+// Clang Static Analyzer
+//===----------------------------------------------------------------------===//
+
+= Library Structure =
+
+The analyzer library has two layers: a (low-level) static analysis
+engine (GRExprEngine.cpp and friends), and some static checkers
+(*Checker.cpp).  The latter are built on top of the former via the
+Checker and CheckerVisitor interfaces (Checker.h and
+CheckerVisitor.h).  The Checker interface is designed to be minimal
+and simple for checker writers, and attempts to isolate them from much
+of the gore of the internal analysis engine.
+
+= How It Works =
+
+The analyzer is inspired by several foundational research papers ([1],
+[2]).  (FIXME: kremenek to add more links)
+
+In a nutshell, the analyzer is basically a source code simulator that
+traces out possible paths of execution.  The state of the program
+(values of variables and expressions) is encapsulated by the state
+(ProgramState).  A location in the program is called a program point
+(ProgramPoint), and the combination of state and program point is a
+node in an exploded graph (ExplodedGraph).  The term "exploded" comes
+from exploding the control-flow edges in the control-flow graph (CFG).
+
+Conceptually the analyzer does a reachability analysis through the
+ExplodedGraph.  We start at a root node, which has the entry program
+point and initial state, and then simulate transitions by analyzing
+individual expressions.  The analysis of an expression can cause the
+state to change, resulting in a new node in the ExplodedGraph with an
+updated program point and an updated state.  A bug is found by hitting
+a node that satisfies some "bug condition" (basically a violation of a
+checking invariant).
+
+The analyzer traces out multiple paths by reasoning about branches and
+then bifurcating the state: on the true branch the conditions of the
+branch are assumed to be true and on the false branch the conditions
+of the branch are assumed to be false.  Such "assumptions" create
+constraints on the values of the program, and those constraints are
+recorded in the ProgramState object (and are manipulated by the
+ConstraintManager).  If assuming the conditions of a branch would
+cause the constraints to be unsatisfiable, the branch is considered
+infeasible and that path is not taken.  This is how we get
+path-sensitivity.  We reduce exponential blow-up by caching nodes.  If
+a new node with the same state and program point as an existing node
+would get generated, the path "caches out" and we simply reuse the
+existing node.  Thus the ExplodedGraph is not a DAG; it can contain
+cycles as paths loop back onto each other and cache out.
+
+ProgramState and ExplodedNodes are basically immutable once created.  Once
+one creates a ProgramState, you need to create a new one to get a new
+ProgramState.  This immutability is key since the ExplodedGraph represents
+the behavior of the analyzed program from the entry point.  To
+represent these efficiently, we use functional data structures (e.g.,
+ImmutableMaps) which share data between instances.
+
+Finally, individual Checkers work by also manipulating the analysis
+state.  The analyzer engine talks to them via a visitor interface.
+For example, the PreVisitCallExpr() method is called by GRExprEngine
+to tell the Checker that we are about to analyze a CallExpr, and the
+checker is asked to check for any preconditions that might not be
+satisfied.  The checker can do nothing, or it can generate a new
+ProgramState and ExplodedNode which contains updated checker state.  If it
+finds a bug, it can tell the BugReporter object about the bug,
+providing it an ExplodedNode which is the last node in the path that
+triggered the problem.
+
+= Notes about C++ =
+
+Since now constructors are seen before the variable that is constructed 
+in the CFG, we create a temporary object as the destination region that 
+is constructed into. See ExprEngine::VisitCXXConstructExpr().
+
+In ExprEngine::processCallExit(), we always bind the object region to the
+evaluated CXXConstructExpr. Then in VisitDeclStmt(), we compute the
+corresponding lazy compound value if the variable is not a reference, and
+bind the variable region to the lazy compound value. If the variable
+is a reference, just use the object region as the initilizer value.
+
+Before entering a C++ method (or ctor/dtor), the 'this' region is bound
+to the object region. In ctors, we synthesize 'this' region with  
+CXXRecordDecl*, which means we do not use type qualifiers. In methods, we
+synthesize 'this' region with CXXMethodDecl*, which has getThisType() 
+taking type qualifiers into account. It does not matter we use qualified
+'this' region in one method and unqualified 'this' region in another
+method, because we only need to ensure the 'this' region is consistent 
+when we synthesize it and create it directly from CXXThisExpr in a single
+method call.
+
+= Working on the Analyzer =
+
+If you are interested in bringing up support for C++ expressions, the
+best place to look is the visitation logic in GRExprEngine, which
+handles the simulation of individual expressions.  There are plenty of
+examples there of how other expressions are handled.
+
+If you are interested in writing checkers, look at the Checker and
+CheckerVisitor interfaces (Checker.h and CheckerVisitor.h).  Also look
+at the files named *Checker.cpp for examples on how you can implement
+these interfaces.
+
+= Debugging the Analyzer =
+
+There are some useful command-line options for debugging.  For example:
+
+$ clang -cc1 -help | grep analyze
+ -analyze-function <value>
+ -analyzer-display-progress
+ -analyzer-viz-egraph-graphviz
+ ...
+
+The first allows you to specify only analyzing a specific function.
+The second prints to the console what function is being analyzed.  The
+third generates a graphviz dot file of the ExplodedGraph.  This is
+extremely useful when debugging the analyzer and viewing the
+simulation results.
+
+Of course, viewing the CFG (Control-Flow Graph) is also useful:
+
+$ clang -cc1 -help | grep cfg
+ -cfg-add-implicit-dtors Add C++ implicit destructors to CFGs for all analyses
+ -cfg-add-initializers   Add C++ initializers to CFGs for all analyses
+ -cfg-dump               Display Control-Flow Graphs
+ -cfg-view               View Control-Flow Graphs using GraphViz
+ -unoptimized-cfg        Generate unoptimized CFGs for all analyses
+
+-cfg-dump dumps a textual representation of the CFG to the console,
+and -cfg-view creates a GraphViz representation.
+
+= References =
+
+[1] Precise interprocedural dataflow analysis via graph reachability,
+    T Reps, S Horwitz, and M Sagiv, POPL '95,
+    http://portal.acm.org/citation.cfm?id=199462
+
+[2] A memory model for static analysis of C programs, Z Xu, T
+    Kremenek, and J Zhang, http://lcs.ios.ac.cn/~xzx/memmodel.pdf