v1.6.0
diff --git a/.gitignore b/.gitignore
index 40a610d..f7fc044 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,4 +1,5 @@
 *.pyc
+*.obj
 *.exe
 *.pdb
 *.ilk
diff --git a/HACKING.md b/HACKING.md
index 059a424..9c6830f 100644
--- a/HACKING.md
+++ b/HACKING.md
@@ -161,3 +161,49 @@
 by following [these instructions][win7sdk].
 
 [win7sdk]: http://www.kegel.com/wine/cl-howto-win7sdk.html
+
+### Using gcov
+
+Do a clean debug build with the right flags:
+
+    CFLAGS=-coverage LDFLAGS=-coverage ./configure.py --debug
+    ninja -t clean ninja_test && ninja ninja_test
+
+Run the test binary to generate `.gcda` and `.gcno` files in the build
+directory, then run gcov on the .o files to generate `.gcov` files in the
+root directory:
+
+    ./ninja_test
+    gcov build/*.o
+
+Look at the generated `.gcov` files directly, or use your favorit gcov viewer.
+
+### Using afl-fuzz
+
+Build with afl-clang++:
+
+    CXX=path/to/afl-1.20b/afl-clang++ ./configure.py
+    ninja
+
+Then run afl-fuzz like so:
+
+    afl-fuzz -i misc/afl-fuzz -o /tmp/afl-fuzz-out ./ninja -n -f @@
+
+You can pass `-x misc/afl-fuzz-tokens` to use the token dictionary. In my
+testing, that did not seem more effective though.
+
+#### Using afl-fuzz with asan
+
+If you want to use asan (the `isysroot` bit is only needed on OS X; if clang
+can't find C++ standard headers make sure your LLVM checkout includes a libc++
+checkout and has libc++ installed in the build directory):
+
+    CFLAGS="-fsanitize=address -isysroot $(xcrun -show-sdk-path)" \
+        LDFLAGS=-fsanitize=address CXX=path/to/afl-1.20b/afl-clang++ \
+        ./configure.py
+    AFL_CXX=path/to/clang++ ninja
+
+Make sure ninja can find the asan runtime:
+
+    DYLD_LIBRARY_PATH=path/to//lib/clang/3.7.0/lib/darwin/ \
+        afl-fuzz -i misc/afl-fuzz -o /tmp/afl-fuzz-out ./ninja -n -f @@
diff --git a/RELEASING b/RELEASING
index fd4affb..4d08253 100644
--- a/RELEASING
+++ b/RELEASING
@@ -23,6 +23,9 @@
 1. copy old mail
 
 Update website:
-(note to self: website is now in github.com/martine/martine.github.io)
-1. rebuild manual, put in place on website
-2. update home page mention of latest version.
+1. Make sure your ninja checkout is on the v1.5.0 tag
+2. Clone https://github.com/martine/martine.github.io
+3. In that repo, `cd ninja && ./update-docs.sh`
+4. Update index.html with newest version and link to release notes
+5. git commit -m 'run update-docs.sh, 1.5.0 release'
+6. git push origin master
diff --git a/configure.py b/configure.py
index aac4ad4..2eacbfe 100755
--- a/configure.py
+++ b/configure.py
@@ -106,8 +106,9 @@
     It also proxies all calls to an underlying ninja_syntax.Writer, to
     behave like non-bootstrap mode.
     """
-    def __init__(self, writer):
+    def __init__(self, writer, verbose=False):
         self.writer = writer
+        self.verbose = verbose
         # Map of variable name => expanded variable value.
         self.vars = {}
         # Map of rule name => dict of rule attributes.
@@ -163,8 +164,10 @@
     def _run_command(self, cmdline):
         """Run a subcommand, quietly.  Prints the full command on error."""
         try:
+            if self.verbose:
+                print(cmdline)
             subprocess.check_call(cmdline, shell=True)
-        except subprocess.CalledProcessError, e:
+        except subprocess.CalledProcessError:
             print('when running: ', cmdline)
             raise
 
@@ -173,6 +176,8 @@
 profilers = ['gmon', 'pprof']
 parser.add_option('--bootstrap', action='store_true',
                   help='bootstrap a ninja binary from nothing')
+parser.add_option('--verbose', action='store_true',
+                  help='enable verbose build')
 parser.add_option('--platform',
                   help='target platform (' +
                        '/'.join(Platform.known_platforms()) + ')',
@@ -217,7 +222,7 @@
     # Wrap ninja_writer with the Bootstrapper, which also executes the
     # commands.
     print('bootstrapping ninja...')
-    n = Bootstrap(n)
+    n = Bootstrap(n, verbose=options.verbose)
 
 n.comment('This file is used to build ninja itself.')
 n.comment('It is generated by ' + os.path.basename(__file__) + '.')
@@ -279,12 +284,13 @@
               '/wd4512', '/wd4800', '/wd4702', '/wd4819',
               # Disable warnings about passing "this" during initialization.
               '/wd4355',
+              # Disable warnings about ignored typedef in DbgHelp.h
+              '/wd4091',
               '/GR-',  # Disable RTTI.
               # Disable size_t -> int truncation warning.
               # We never have strings or arrays larger than 2**31.
               '/wd4267',
               '/DNOMINMAX', '/D_CRT_SECURE_NO_WARNINGS',
-              '/D_VARIADIC_MAX=10',
               '/DNINJA_PYTHON="%s"' % options.with_python]
     if options.bootstrap:
         # In bootstrap mode, we have no ninja process to catch /showIncludes
@@ -310,8 +316,14 @@
         cflags.remove('-fno-rtti')  # Needed for above pedanticness.
     else:
         cflags += ['-O2', '-DNDEBUG']
-    if 'clang' in os.path.basename(CXX):
-        cflags += ['-fcolor-diagnostics']
+    try:
+        proc = subprocess.Popen(
+            [CXX, '-fdiagnostics-color', '-c', '-x', 'c++', '/dev/null'],
+            stdout=open(os.devnull, 'wb'), stderr=subprocess.STDOUT)
+        if proc.wait() == 0:
+            cflags += ['-fdiagnostics-color']
+    except:
+        pass
     if platform.is_mingw():
         cflags += ['-D_WIN32_WINNT=0x0501']
     ldflags = ['-L$builddir']
@@ -602,6 +614,10 @@
 n.close()
 print('wrote %s.' % BUILD_FILENAME)
 
+verbose = ''
+if options.verbose:
+    verbose = ' -v'
+
 if options.bootstrap:
     print('bootstrap complete.  rebuilding...')
     if platform.is_windows():
@@ -609,6 +625,6 @@
         if os.path.exists(bootstrap_exe):
             os.unlink(bootstrap_exe)
         os.rename('ninja.exe', bootstrap_exe)
-        subprocess.check_call('ninja.bootstrap.exe', shell=True)
+        subprocess.check_call('ninja.bootstrap.exe%s' % verbose, shell=True)
     else:
-        subprocess.check_call('./ninja', shell=True)
+        subprocess.check_call('./ninja%s' % verbose, shell=True)
diff --git a/doc/manual.asciidoc b/doc/manual.asciidoc
index 21f4e42..dc4ffad 100644
--- a/doc/manual.asciidoc
+++ b/doc/manual.asciidoc
@@ -1,7 +1,7 @@
 Ninja
 =====
 Evan Martin <martine@danga.com>
-v1.5.3, Nov 2014
+v1.6.0, Jun 2015
 
 
 Introduction
@@ -181,6 +181,12 @@
 the current directory and builds all out-of-date targets.  You can
 specify which targets (files) to build as command line arguments.
 
+There is also a special syntax `target^` for specifying a target
+as the first output of some rule containing the source you put in
+the command line, if one exists. For example, if you specify target as
+`foo.c^` then `foo.o` will get built (assuming you have those targets
+in your build files).
+
 `ninja -h` prints help output.  Many of Ninja's flags intentionally
 match those of Make; e.g `ninja -C build -j 20` changes into the
 `build` directory and runs 20 build commands in parallel.  (Note that
@@ -915,9 +921,12 @@
 
 Top-level variable declarations are scoped to the file they occur in.
 
+Rule declarations are also scoped to the file they occur in.
+_(Available since Ninja 1.6)_
+
 The `subninja` keyword, used to include another `.ninja` file,
 introduces a new scope.  The included `subninja` file may use the
-variables from the parent file, and shadow their values for the file's
+variables and rules from the parent file, and shadow their values for the file's
 scope, but it won't affect values of the variables in the parent.
 
 To include another `.ninja` file in the current scope, much like a C
diff --git a/misc/afl-fuzz-tokens/kw_build b/misc/afl-fuzz-tokens/kw_build
new file mode 100644
index 0000000..c795b05
--- /dev/null
+++ b/misc/afl-fuzz-tokens/kw_build
@@ -0,0 +1 @@
+build
\ No newline at end of file
diff --git a/misc/afl-fuzz-tokens/kw_default b/misc/afl-fuzz-tokens/kw_default
new file mode 100644
index 0000000..331d858
--- /dev/null
+++ b/misc/afl-fuzz-tokens/kw_default
@@ -0,0 +1 @@
+default
\ No newline at end of file
diff --git a/misc/afl-fuzz-tokens/kw_include b/misc/afl-fuzz-tokens/kw_include
new file mode 100644
index 0000000..2996fba
--- /dev/null
+++ b/misc/afl-fuzz-tokens/kw_include
@@ -0,0 +1 @@
+include
\ No newline at end of file
diff --git a/misc/afl-fuzz-tokens/kw_pool b/misc/afl-fuzz-tokens/kw_pool
new file mode 100644
index 0000000..e783591
--- /dev/null
+++ b/misc/afl-fuzz-tokens/kw_pool
@@ -0,0 +1 @@
+pool
\ No newline at end of file
diff --git a/misc/afl-fuzz-tokens/kw_rule b/misc/afl-fuzz-tokens/kw_rule
new file mode 100644
index 0000000..841e840
--- /dev/null
+++ b/misc/afl-fuzz-tokens/kw_rule
@@ -0,0 +1 @@
+rule
\ No newline at end of file
diff --git a/misc/afl-fuzz-tokens/kw_subninja b/misc/afl-fuzz-tokens/kw_subninja
new file mode 100644
index 0000000..c4fe0c7
--- /dev/null
+++ b/misc/afl-fuzz-tokens/kw_subninja
@@ -0,0 +1 @@
+subninja
\ No newline at end of file
diff --git a/misc/afl-fuzz-tokens/misc_a b/misc/afl-fuzz-tokens/misc_a
new file mode 100644
index 0000000..2e65efe
--- /dev/null
+++ b/misc/afl-fuzz-tokens/misc_a
@@ -0,0 +1 @@
+a
\ No newline at end of file
diff --git a/misc/afl-fuzz-tokens/misc_b b/misc/afl-fuzz-tokens/misc_b
new file mode 100644
index 0000000..63d8dbd
--- /dev/null
+++ b/misc/afl-fuzz-tokens/misc_b
@@ -0,0 +1 @@
+b
\ No newline at end of file
diff --git a/misc/afl-fuzz-tokens/misc_colon b/misc/afl-fuzz-tokens/misc_colon
new file mode 100644
index 0000000..22ded55
--- /dev/null
+++ b/misc/afl-fuzz-tokens/misc_colon
@@ -0,0 +1 @@
+:
\ No newline at end of file
diff --git a/misc/afl-fuzz-tokens/misc_cont b/misc/afl-fuzz-tokens/misc_cont
new file mode 100644
index 0000000..857f13a
--- /dev/null
+++ b/misc/afl-fuzz-tokens/misc_cont
@@ -0,0 +1 @@
+$
diff --git a/misc/afl-fuzz-tokens/misc_dollar b/misc/afl-fuzz-tokens/misc_dollar
new file mode 100644
index 0000000..6f4f765
--- /dev/null
+++ b/misc/afl-fuzz-tokens/misc_dollar
@@ -0,0 +1 @@
+$
\ No newline at end of file
diff --git a/misc/afl-fuzz-tokens/misc_eq b/misc/afl-fuzz-tokens/misc_eq
new file mode 100644
index 0000000..851c75c
--- /dev/null
+++ b/misc/afl-fuzz-tokens/misc_eq
@@ -0,0 +1 @@
+=
\ No newline at end of file
diff --git a/misc/afl-fuzz-tokens/misc_indent b/misc/afl-fuzz-tokens/misc_indent
new file mode 100644
index 0000000..136d063
--- /dev/null
+++ b/misc/afl-fuzz-tokens/misc_indent
@@ -0,0 +1 @@
+  
\ No newline at end of file
diff --git a/misc/afl-fuzz-tokens/misc_pipe b/misc/afl-fuzz-tokens/misc_pipe
new file mode 100644
index 0000000..a3871d4
--- /dev/null
+++ b/misc/afl-fuzz-tokens/misc_pipe
@@ -0,0 +1 @@
+|
\ No newline at end of file
diff --git a/misc/afl-fuzz-tokens/misc_pipepipe b/misc/afl-fuzz-tokens/misc_pipepipe
new file mode 100644
index 0000000..27cc728
--- /dev/null
+++ b/misc/afl-fuzz-tokens/misc_pipepipe
@@ -0,0 +1 @@
+||
\ No newline at end of file
diff --git a/misc/afl-fuzz-tokens/misc_space b/misc/afl-fuzz-tokens/misc_space
new file mode 100644
index 0000000..0519ecb
--- /dev/null
+++ b/misc/afl-fuzz-tokens/misc_space
@@ -0,0 +1 @@
+ 
\ No newline at end of file
diff --git a/misc/afl-fuzz/build.ninja b/misc/afl-fuzz/build.ninja
new file mode 100644
index 0000000..52cd2f1
--- /dev/null
+++ b/misc/afl-fuzz/build.ninja
@@ -0,0 +1,5 @@
+rule b
+  command = clang -MMD -MF $out.d -o $out -c $in
+  description = building $out
+
+build a.o: b a.c
diff --git a/misc/ninja-mode.el b/misc/ninja-mode.el
index 71825d5..639e537 100644
--- a/misc/ninja-mode.el
+++ b/misc/ninja-mode.el
@@ -47,28 +47,27 @@
 
 (defun ninja-syntax-propertize (start end)
   (save-match-data
-    (save-excursion
-      (goto-char start)
-      (while (search-forward "#" end t)
-        (let ((match-pos (match-beginning 0)))
-          (when (and
-                 ;; Is it the first non-white character on the line?
-                 (eq match-pos (save-excursion (back-to-indentation) (point)))
-                 (save-excursion
-                   (goto-char (line-end-position 0))
-                   (or
-                    ;; If we're continuting the previous line, it's not a
-                    ;; comment.
-                    (not (eq ?$ (char-before)))
-                    ;; Except if the previous line is a comment as well, as the
-                    ;; continuation dollar is ignored then.
-                    (nth 4 (syntax-ppss)))))
-            (put-text-property match-pos (1+ match-pos) 'syntax-table '(11))
-            (let ((line-end (line-end-position)))
-              ;; Avoid putting properties past the end of the buffer.
-              ;; Otherwise we get an `args-out-of-range' error.
-              (unless (= line-end (1+ (buffer-size)))
-                (put-text-property line-end (1+ line-end) 'syntax-table '(12))))))))))
+    (goto-char start)
+    (while (search-forward "#" end t)
+      (let ((match-pos (match-beginning 0)))
+        (when (and
+               ;; Is it the first non-white character on the line?
+               (eq match-pos (save-excursion (back-to-indentation) (point)))
+               (save-excursion
+                 (goto-char (line-end-position 0))
+                 (or
+                  ;; If we're continuting the previous line, it's not a
+                  ;; comment.
+                  (not (eq ?$ (char-before)))
+                  ;; Except if the previous line is a comment as well, as the
+                  ;; continuation dollar is ignored then.
+                  (nth 4 (syntax-ppss)))))
+          (put-text-property match-pos (1+ match-pos) 'syntax-table '(11))
+          (let ((line-end (line-end-position)))
+            ;; Avoid putting properties past the end of the buffer.
+            ;; Otherwise we get an `args-out-of-range' error.
+            (unless (= line-end (1+ (buffer-size)))
+              (put-text-property line-end (1+ line-end) 'syntax-table '(12)))))))))
 
 ;;;###autoload
 (define-derived-mode ninja-mode prog-mode "ninja"
diff --git a/misc/zsh-completion b/misc/zsh-completion
index af24f89..fd9b3a7 100644
--- a/misc/zsh-completion
+++ b/misc/zsh-completion
@@ -32,7 +32,7 @@
 }
 
 __get_modes() {
-  ninja -d list 2>/dev/null | while read -r a b; do echo $a; done | tail -n +2 | head -n -1
+  ninja -d list 2>/dev/null | while read -r a b; do echo $a; done | tail -n +2 | sed '$d'
 }
 
 __modes() {
diff --git a/src/build.cc b/src/build.cc
index 3e74131..cdb8e0a 100644
--- a/src/build.cc
+++ b/src/build.cc
@@ -278,7 +278,7 @@
     return false;
   }
 
-  if (CheckDependencyCycle(node, stack, err))
+  if (CheckDependencyCycle(node, *stack, err))
     return false;
 
   if (edge->outputs_ready())
@@ -316,20 +316,32 @@
   return true;
 }
 
-bool Plan::CheckDependencyCycle(Node* node, vector<Node*>* stack, string* err) {
-  vector<Node*>::reverse_iterator ri =
-      find(stack->rbegin(), stack->rend(), node);
-  if (ri == stack->rend())
+bool Plan::CheckDependencyCycle(Node* node, const vector<Node*>& stack,
+                                string* err) {
+  vector<Node*>::const_iterator start = stack.begin();
+  while (start != stack.end() && (*start)->in_edge() != node->in_edge())
+    ++start;
+  if (start == stack.end())
     return false;
 
-  // Add this node onto the stack to make it clearer where the loop
-  // is.
-  stack->push_back(node);
+  // Build error string for the cycle.
+  vector<Node*> cycle(start, stack.end());
+  cycle.push_back(node);
 
-  vector<Node*>::iterator start = find(stack->begin(), stack->end(), node);
+  if (cycle.front() != cycle.back()) {
+    // Consider
+    //   build a b: cat c
+    //   build c: cat a
+    // stack will contain [b, c], node will be a.  To not print b -> c -> a,
+    // shift by one to get c -> a -> c which makes the cycle clear.
+    cycle.erase(cycle.begin());
+    cycle.push_back(cycle.front());
+    assert(cycle.front() == cycle.back());
+  }
+
   *err = "dependency cycle: ";
-  for (vector<Node*>::iterator i = start; i != stack->end(); ++i) {
-    if (i != start)
+  for (vector<Node*>::const_iterator i = cycle.begin(); i != cycle.end(); ++i) {
+    if (i != cycle.begin())
       err->append(" -> ");
     err->append((*i)->path());
   }
@@ -339,9 +351,9 @@
 Edge* Plan::FindWork() {
   if (ready_.empty())
     return NULL;
-  set<Edge*>::iterator i = ready_.begin();
-  Edge* edge = *i;
-  ready_.erase(i);
+  set<Edge*>::iterator e = ready_.begin();
+  Edge* edge = *e;
+  ready_.erase(e);
   return edge;
 }
 
@@ -362,101 +374,106 @@
   }
 }
 
-void Plan::ResumeDelayedJobs(Edge* edge) {
-  edge->pool()->EdgeFinished(*edge);
-  edge->pool()->RetrieveReadyEdges(&ready_);
-}
-
 void Plan::EdgeFinished(Edge* edge) {
-  map<Edge*, bool>::iterator i = want_.find(edge);
-  assert(i != want_.end());
-  if (i->second)
+  map<Edge*, bool>::iterator e = want_.find(edge);
+  assert(e != want_.end());
+  bool directly_wanted = e->second;
+  if (directly_wanted)
     --wanted_edges_;
-  want_.erase(i);
+  want_.erase(e);
   edge->outputs_ready_ = true;
 
-  // See if this job frees up any delayed jobs
-  ResumeDelayedJobs(edge);
+  // See if this job frees up any delayed jobs.
+  if (directly_wanted)
+    edge->pool()->EdgeFinished(*edge);
+  edge->pool()->RetrieveReadyEdges(&ready_);
 
   // Check off any nodes we were waiting for with this edge.
-  for (vector<Node*>::iterator i = edge->outputs_.begin();
-       i != edge->outputs_.end(); ++i) {
-    NodeFinished(*i);
+  for (vector<Node*>::iterator o = edge->outputs_.begin();
+       o != edge->outputs_.end(); ++o) {
+    NodeFinished(*o);
   }
 }
 
 void Plan::NodeFinished(Node* node) {
   // See if we we want any edges from this node.
-  for (vector<Edge*>::const_iterator i = node->out_edges().begin();
-       i != node->out_edges().end(); ++i) {
-    map<Edge*, bool>::iterator want_i = want_.find(*i);
-    if (want_i == want_.end())
+  for (vector<Edge*>::const_iterator oe = node->out_edges().begin();
+       oe != node->out_edges().end(); ++oe) {
+    map<Edge*, bool>::iterator want_e = want_.find(*oe);
+    if (want_e == want_.end())
       continue;
 
     // See if the edge is now ready.
-    if ((*i)->AllInputsReady()) {
-      if (want_i->second) {
-        ScheduleWork(*i);
+    if ((*oe)->AllInputsReady()) {
+      if (want_e->second) {
+        ScheduleWork(*oe);
       } else {
         // We do not need to build this edge, but we might need to build one of
         // its dependents.
-        EdgeFinished(*i);
+        EdgeFinished(*oe);
       }
     }
   }
 }
 
-void Plan::CleanNode(DependencyScan* scan, Node* node) {
+bool Plan::CleanNode(DependencyScan* scan, Node* node, string* err) {
   node->set_dirty(false);
 
-  for (vector<Edge*>::const_iterator ei = node->out_edges().begin();
-       ei != node->out_edges().end(); ++ei) {
+  for (vector<Edge*>::const_iterator oe = node->out_edges().begin();
+       oe != node->out_edges().end(); ++oe) {
     // Don't process edges that we don't actually want.
-    map<Edge*, bool>::iterator want_i = want_.find(*ei);
-    if (want_i == want_.end() || !want_i->second)
+    map<Edge*, bool>::iterator want_e = want_.find(*oe);
+    if (want_e == want_.end() || !want_e->second)
       continue;
 
     // Don't attempt to clean an edge if it failed to load deps.
-    if ((*ei)->deps_missing_)
+    if ((*oe)->deps_missing_)
       continue;
 
     // If all non-order-only inputs for this edge are now clean,
     // we might have changed the dirty state of the outputs.
     vector<Node*>::iterator
-        begin = (*ei)->inputs_.begin(),
-        end = (*ei)->inputs_.end() - (*ei)->order_only_deps_;
+        begin = (*oe)->inputs_.begin(),
+        end = (*oe)->inputs_.end() - (*oe)->order_only_deps_;
     if (find_if(begin, end, mem_fun(&Node::dirty)) == end) {
       // Recompute most_recent_input.
       Node* most_recent_input = NULL;
-      for (vector<Node*>::iterator ni = begin; ni != end; ++ni) {
-        if (!most_recent_input || (*ni)->mtime() > most_recent_input->mtime())
-          most_recent_input = *ni;
+      for (vector<Node*>::iterator i = begin; i != end; ++i) {
+        if (!most_recent_input || (*i)->mtime() > most_recent_input->mtime())
+          most_recent_input = *i;
       }
 
       // Now, this edge is dirty if any of the outputs are dirty.
       // If the edge isn't dirty, clean the outputs and mark the edge as not
       // wanted.
-      if (!scan->RecomputeOutputsDirty(*ei, most_recent_input)) {
-        for (vector<Node*>::iterator ni = (*ei)->outputs_.begin();
-             ni != (*ei)->outputs_.end(); ++ni) {
-          CleanNode(scan, *ni);
+      bool outputs_dirty = false;
+      if (!scan->RecomputeOutputsDirty(*oe, most_recent_input,
+                                       &outputs_dirty, err)) {
+        return false;
+      }
+      if (!outputs_dirty) {
+        for (vector<Node*>::iterator o = (*oe)->outputs_.begin();
+             o != (*oe)->outputs_.end(); ++o) {
+          if (!CleanNode(scan, *o, err))
+            return false;
         }
 
-        want_i->second = false;
+        want_e->second = false;
         --wanted_edges_;
-        if (!(*ei)->is_phony())
+        if (!(*oe)->is_phony())
           --command_edges_;
       }
     }
   }
+  return true;
 }
 
 void Plan::Dump() {
   printf("pending: %d\n", (int)want_.size());
-  for (map<Edge*, bool>::iterator i = want_.begin(); i != want_.end(); ++i) {
-    if (i->second)
+  for (map<Edge*, bool>::iterator e = want_.begin(); e != want_.end(); ++e) {
+    if (e->second)
       printf("want ");
-    i->first->Dump();
+    e->first->Dump();
   }
   printf("ready: %d\n", (int)ready_.size());
 }
@@ -477,9 +494,9 @@
 
 vector<Edge*> RealCommandRunner::GetActiveEdges() {
   vector<Edge*> edges;
-  for (map<Subprocess*, Edge*>::iterator i = subproc_to_edge_.begin();
-       i != subproc_to_edge_.end(); ++i)
-    edges.push_back(i->second);
+  for (map<Subprocess*, Edge*>::iterator e = subproc_to_edge_.begin();
+       e != subproc_to_edge_.end(); ++e)
+    edges.push_back(e->second);
   return edges;
 }
 
@@ -516,9 +533,9 @@
   result->status = subproc->Finish();
   result->output = subproc->GetOutput();
 
-  map<Subprocess*, Edge*>::iterator i = subproc_to_edge_.find(subproc);
-  result->edge = i->second;
-  subproc_to_edge_.erase(i);
+  map<Subprocess*, Edge*>::iterator e = subproc_to_edge_.find(subproc);
+  result->edge = e->second;
+  subproc_to_edge_.erase(e);
 
   delete subproc;
   return true;
@@ -541,11 +558,11 @@
     vector<Edge*> active_edges = command_runner_->GetActiveEdges();
     command_runner_->Abort();
 
-    for (vector<Edge*>::iterator i = active_edges.begin();
-         i != active_edges.end(); ++i) {
-      string depfile = (*i)->GetUnescapedDepfile();
-      for (vector<Node*>::iterator ni = (*i)->outputs_.begin();
-           ni != (*i)->outputs_.end(); ++ni) {
+    for (vector<Edge*>::iterator e = active_edges.begin();
+         e != active_edges.end(); ++e) {
+      string depfile = (*e)->GetUnescapedDepfile();
+      for (vector<Node*>::iterator o = (*e)->outputs_.begin();
+           o != (*e)->outputs_.end(); ++o) {
         // Only delete this output if it was actually modified.  This is
         // important for things like the generator where we don't want to
         // delete the manifest file if we can avoid it.  But if the rule
@@ -553,10 +570,12 @@
         // need to rebuild an output because of a modified header file
         // mentioned in a depfile, and the command touches its depfile
         // but is interrupted before it touches its output file.)
-        if (!depfile.empty() ||
-            (*ni)->mtime() != disk_interface_->Stat((*ni)->path())) {
-          disk_interface_->RemoveFile((*ni)->path());
-        }
+        string err;
+        TimeStamp new_mtime = disk_interface_->Stat((*o)->path(), &err);
+        if (new_mtime == -1)  // Log and ignore Stat() errors.
+          Error("%s", err.c_str());
+        if (!depfile.empty() || (*o)->mtime() != new_mtime)
+          disk_interface_->RemoveFile((*o)->path());
       }
       if (!depfile.empty())
         disk_interface_->RemoveFile(depfile);
@@ -576,7 +595,6 @@
 }
 
 bool Builder::AddTarget(Node* node, string* err) {
-  node->StatIfNecessary(disk_interface_);
   if (Edge* in_edge = node->in_edge()) {
     if (!scan_.RecomputeDirty(in_edge, err))
       return false;
@@ -690,9 +708,9 @@
 
   // Create directories necessary for outputs.
   // XXX: this will block; do we care?
-  for (vector<Node*>::iterator i = edge->outputs_.begin();
-       i != edge->outputs_.end(); ++i) {
-    if (!disk_interface_->MakeDirs((*i)->path()))
+  for (vector<Node*>::iterator o = edge->outputs_.begin();
+       o != edge->outputs_.end(); ++o) {
+    if (!disk_interface_->MakeDirs((*o)->path()))
       return false;
   }
 
@@ -752,14 +770,17 @@
   if (edge->GetBindingBool("restat") && !config_.dry_run) {
     bool node_cleaned = false;
 
-    for (vector<Node*>::iterator i = edge->outputs_.begin();
-         i != edge->outputs_.end(); ++i) {
-      TimeStamp new_mtime = disk_interface_->Stat((*i)->path());
-      if ((*i)->mtime() == new_mtime) {
+    for (vector<Node*>::iterator o = edge->outputs_.begin();
+         o != edge->outputs_.end(); ++o) {
+      TimeStamp new_mtime = disk_interface_->Stat((*o)->path(), err);
+      if (new_mtime == -1)
+        return false;
+      if ((*o)->mtime() == new_mtime) {
         // The rule command did not change the output.  Propagate the clean
         // state through the build graph.
         // Note that this also applies to nonexistent outputs (mtime == 0).
-        plan_.CleanNode(&scan_, *i);
+        if (!plan_.CleanNode(&scan_, *o, err))
+          return false;
         node_cleaned = true;
       }
     }
@@ -769,14 +790,18 @@
       // (existing) non-order-only input or the depfile.
       for (vector<Node*>::iterator i = edge->inputs_.begin();
            i != edge->inputs_.end() - edge->order_only_deps_; ++i) {
-        TimeStamp input_mtime = disk_interface_->Stat((*i)->path());
+        TimeStamp input_mtime = disk_interface_->Stat((*i)->path(), err);
+        if (input_mtime == -1)
+          return false;
         if (input_mtime > restat_mtime)
           restat_mtime = input_mtime;
       }
 
       string depfile = edge->GetUnescapedDepfile();
       if (restat_mtime != 0 && deps_type.empty() && !depfile.empty()) {
-        TimeStamp depfile_mtime = disk_interface_->Stat(depfile);
+        TimeStamp depfile_mtime = disk_interface_->Stat(depfile, err);
+        if (depfile_mtime == -1)
+          return false;
         if (depfile_mtime > restat_mtime)
           restat_mtime = depfile_mtime;
       }
@@ -805,7 +830,9 @@
   if (!deps_type.empty() && !config_.dry_run) {
     assert(edge->outputs_.size() == 1 && "should have been rejected by parser");
     Node* out = edge->outputs_[0];
-    TimeStamp deps_mtime = disk_interface_->Stat(out->path());
+    TimeStamp deps_mtime = disk_interface_->Stat(out->path(), err);
+    if (deps_mtime == -1)
+      return false;
     if (!scan_.deps_log()->RecordDeps(out, deps_mtime, deps_nodes)) {
       *err = string("Error writing to deps log: ") + strerror(errno);
       return false;
diff --git a/src/build.h b/src/build.h
index eb3636a..8106faa 100644
--- a/src/build.h
+++ b/src/build.h
@@ -61,14 +61,16 @@
   void EdgeFinished(Edge* edge);
 
   /// Clean the given node during the build.
-  void CleanNode(DependencyScan* scan, Node* node);
+  /// Return false on error.
+  bool CleanNode(DependencyScan* scan, Node* node, string* err);
 
   /// Number of edges with commands to run.
   int command_edge_count() const { return command_edges_; }
 
 private:
   bool AddSubTarget(Node* node, vector<Node*>* stack, string* err);
-  bool CheckDependencyCycle(Node* node, vector<Node*>* stack, string* err);
+  bool CheckDependencyCycle(Node* node, const vector<Node*>& stack,
+                            string* err);
   void NodeFinished(Node* node);
 
   /// Submits a ready edge as a candidate for execution.
@@ -76,11 +78,6 @@
   /// currently-full pool.
   void ScheduleWork(Edge* edge);
 
-  /// Allows jobs blocking on |edge| to potentially resume.
-  /// For example, if |edge| is a member of a pool, calling this may schedule
-  /// previously pending jobs in that pool.
-  void ResumeDelayedJobs(Edge* edge);
-
   /// Keep track of which edges we want to build in this plan.  If this map does
   /// not contain an entry for an edge, we do not want to build the entry or its
   /// dependents.  If an entry maps to false, we do not want to build it, but we
diff --git a/src/build_log.cc b/src/build_log.cc
index b6f9874..281b851 100644
--- a/src/build_log.cc
+++ b/src/build_log.cc
@@ -102,7 +102,7 @@
 {}
 
 BuildLog::BuildLog()
-  : log_file_(NULL), needs_recompaction_(false), quiet_(false) {}
+  : log_file_(NULL), needs_recompaction_(false) {}
 
 BuildLog::~BuildLog() {
   Close();
@@ -354,8 +354,6 @@
 bool BuildLog::Recompact(const string& path, const BuildLogUser& user,
                          string* err) {
   METRIC_RECORD(".ninja_log recompact");
-  if (!quiet_)
-    printf("Recompacting log...\n");
 
   Close();
   string temp_path = path + ".recompact";
diff --git a/src/build_log.h b/src/build_log.h
index db0cfe0..fe81a85 100644
--- a/src/build_log.h
+++ b/src/build_log.h
@@ -80,7 +80,6 @@
 
   /// Rewrite the known log entries, throwing away old data.
   bool Recompact(const string& path, const BuildLogUser& user, string* err);
-  void set_quiet(bool quiet) { quiet_ = quiet; }
 
   typedef ExternalStringHashMap<LogEntry*>::Type Entries;
   const Entries& entries() const { return entries_; }
@@ -89,7 +88,6 @@
   Entries entries_;
   FILE* log_file_;
   bool needs_recompaction_;
-  bool quiet_;
 };
 
 #endif // NINJA_BUILD_LOG_H_
diff --git a/src/build_log_test.cc b/src/build_log_test.cc
index 7ea2117..2c41ba6 100644
--- a/src/build_log_test.cc
+++ b/src/build_log_test.cc
@@ -290,7 +290,6 @@
   ASSERT_TRUE(log2.LookupByOutput("out"));
   ASSERT_TRUE(log2.LookupByOutput("out2"));
   // ...and force a recompaction.
-  log2.set_quiet(true);
   EXPECT_TRUE(log2.OpenForWrite(kTestFilename, *this, &err));
   log2.Close();
 
diff --git a/src/build_test.cc b/src/build_test.cc
index bd1cd30..52a17c9 100644
--- a/src/build_test.cc
+++ b/src/build_test.cc
@@ -51,9 +51,9 @@
 };
 
 TEST_F(PlanTest, Basic) {
-  AssertParse(&state_,
+  ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
 "build out: cat mid\n"
-"build mid: cat in\n");
+"build mid: cat in\n"));
   GetNode("mid")->MarkDirty();
   GetNode("out")->MarkDirty();
   string err;
@@ -84,9 +84,9 @@
 
 // Test that two outputs from one rule can be handled as inputs to the next.
 TEST_F(PlanTest, DoubleOutputDirect) {
-  AssertParse(&state_,
+  ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
 "build out: cat mid1 mid2\n"
-"build mid1 mid2: cat in\n");
+"build mid1 mid2: cat in\n"));
   GetNode("mid1")->MarkDirty();
   GetNode("mid2")->MarkDirty();
   GetNode("out")->MarkDirty();
@@ -111,11 +111,11 @@
 
 // Test that two outputs from one rule can eventually be routed to another.
 TEST_F(PlanTest, DoubleOutputIndirect) {
-  AssertParse(&state_,
+  ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
 "build out: cat b1 b2\n"
 "build b1: cat a1\n"
 "build b2: cat a2\n"
-"build a1 a2: cat in\n");
+"build a1 a2: cat in\n"));
   GetNode("a1")->MarkDirty();
   GetNode("a2")->MarkDirty();
   GetNode("b1")->MarkDirty();
@@ -149,11 +149,11 @@
 
 // Test that two edges from one output can both execute.
 TEST_F(PlanTest, DoubleDependent) {
-  AssertParse(&state_,
+  ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
 "build out: cat a1 a2\n"
 "build a1: cat mid\n"
 "build a2: cat mid\n"
-"build mid: cat in\n");
+"build mid: cat in\n"));
   GetNode("mid")->MarkDirty();
   GetNode("a1")->MarkDirty();
   GetNode("a2")->MarkDirty();
@@ -186,11 +186,11 @@
 }
 
 TEST_F(PlanTest, DependencyCycle) {
-  AssertParse(&state_,
+  ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
 "build out: cat mid\n"
 "build mid: cat in\n"
 "build in: cat pre\n"
-"build pre: cat out\n");
+"build pre: cat out\n"));
   GetNode("out")->MarkDirty();
   GetNode("mid")->MarkDirty();
   GetNode("in")->MarkDirty();
@@ -201,6 +201,43 @@
   ASSERT_EQ("dependency cycle: out -> mid -> in -> pre -> out", err);
 }
 
+TEST_F(PlanTest, CycleInEdgesButNotInNodes1) {
+  string err;
+  ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build a b: cat a\n"));
+  EXPECT_FALSE(plan_.AddTarget(GetNode("b"), &err));
+  ASSERT_EQ("dependency cycle: a -> a", err);
+}
+
+TEST_F(PlanTest, CycleInEdgesButNotInNodes2) {
+  string err;
+  ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build b a: cat a\n"));
+  EXPECT_FALSE(plan_.AddTarget(GetNode("b"), &err));
+  ASSERT_EQ("dependency cycle: a -> a", err);
+}
+
+TEST_F(PlanTest, CycleInEdgesButNotInNodes3) {
+  string err;
+  ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build a b: cat c\n"
+"build c: cat a\n"));
+  EXPECT_FALSE(plan_.AddTarget(GetNode("b"), &err));
+  ASSERT_EQ("dependency cycle: c -> a -> c", err);
+}
+
+TEST_F(PlanTest, CycleInEdgesButNotInNodes4) {
+  string err;
+  ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"build d: cat c\n"
+"build c: cat b\n"
+"build b: cat a\n"
+"build a e: cat d\n"
+"build f: cat e\n"));
+  EXPECT_FALSE(plan_.AddTarget(GetNode("f"), &err));
+  ASSERT_EQ("dependency cycle: d -> c -> b -> a -> d", err);
+}
+
 void PlanTest::TestPoolWithDepthOne(const char* test_case) {
   ASSERT_NO_FATAL_FAILURE(AssertParse(&state_, test_case));
   GetNode("out1")->MarkDirty();
@@ -458,8 +495,8 @@
   /// State of command_runner_ and logs contents (if specified) ARE MODIFIED.
   /// Handy to check for NOOP builds, and higher-level rebuild tests.
   void RebuildTarget(const string& target, const char* manifest,
-                     const char* log_path = NULL,
-                     const char* deps_path = NULL);
+                     const char* log_path = NULL, const char* deps_path = NULL,
+                     State* state = NULL);
 
   // Mark a path dirty.
   void Dirty(const string& path);
@@ -479,10 +516,13 @@
 };
 
 void BuildTest::RebuildTarget(const string& target, const char* manifest,
-                              const char* log_path, const char* deps_path) {
-  State state;
-  ASSERT_NO_FATAL_FAILURE(AddCatRule(&state));
-  AssertParse(&state, manifest);
+                              const char* log_path, const char* deps_path,
+                              State* state) {
+  State local_state, *pstate = &local_state;
+  if (state)
+    pstate = state;
+  ASSERT_NO_FATAL_FAILURE(AddCatRule(pstate));
+  AssertParse(pstate, manifest);
 
   string err;
   BuildLog build_log, *pbuild_log = NULL;
@@ -495,13 +535,13 @@
 
   DepsLog deps_log, *pdeps_log = NULL;
   if (deps_path) {
-    ASSERT_TRUE(deps_log.Load(deps_path, &state, &err));
+    ASSERT_TRUE(deps_log.Load(deps_path, pstate, &err));
     ASSERT_TRUE(deps_log.OpenForWrite(deps_path, &err));
     ASSERT_EQ("", err);
     pdeps_log = &deps_log;
   }
 
-  Builder builder(&state, config_, pbuild_log, pdeps_log, &fs_);
+  Builder builder(pstate, config_, pbuild_log, pdeps_log, &fs_);
   EXPECT_TRUE(builder.AddTarget(target, &err));
 
   command_runner_.commands_ran_.clear();
@@ -816,8 +856,7 @@
   fs_.Create("foo.c", "");
   fs_.Create("foo.o.d", "randomtext\n");
   EXPECT_FALSE(builder_.AddTarget("foo.o", &err));
-  EXPECT_EQ("expected depfile 'foo.o.d' to mention 'foo.o', got 'randomtext'",
-            err);
+  EXPECT_EQ("foo.o.d: expected ':' in depfile", err);
 }
 
 TEST_F(BuildTest, OrderOnlyDeps) {
@@ -1056,6 +1095,31 @@
   ASSERT_EQ("cannot make progress due to previous errors", err);
 }
 
+TEST_F(BuildTest, PoolEdgesReadyButNotWanted) {
+  fs_.Create("x", "");
+
+  const char* manifest =
+    "pool some_pool\n"
+    "  depth = 4\n"
+    "rule touch\n"
+    "  command = touch $out\n"
+    "  pool = some_pool\n"
+    "rule cc\n"
+    "  command = touch grit\n"
+    "\n"
+    "build B.d.stamp: cc | x\n"
+    "build C.stamp: touch B.d.stamp\n"
+    "build final.stamp: touch || C.stamp\n";
+
+  RebuildTarget("final.stamp", manifest);
+
+  fs_.RemoveFile("B.d.stamp");
+
+  State save_state;
+  RebuildTarget("final.stamp", manifest, NULL, NULL, &save_state);
+  EXPECT_GE(save_state.LookupPool("some_pool")->current_use(), 0);
+}
+
 struct BuildWithLogTest : public BuildTest {
   BuildWithLogTest() {
     builder_.SetBuildLog(&build_log_);
@@ -1414,7 +1478,7 @@
   ASSERT_EQ("Another very long command", fs_.files_["out.rsp"].contents);
 }
 
-// Test that contens of the RSP file behaves like a regular part of
+// Test that contents of the RSP file behaves like a regular part of
 // command line, i.e. triggers a rebuild if changed
 TEST_F(BuildWithLogTest, RspFileCmdLineChange) {
   ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
@@ -1484,7 +1548,7 @@
   EXPECT_FALSE(builder_.Build(&err));
   EXPECT_EQ("interrupted by user", err);
   builder_.Cleanup();
-  EXPECT_GT(fs_.Stat("out1"), 0);
+  EXPECT_GT(fs_.Stat("out1", &err), 0);
   err = "";
 
   // A touched output of an interrupted command should be deleted.
@@ -1493,7 +1557,22 @@
   EXPECT_FALSE(builder_.Build(&err));
   EXPECT_EQ("interrupted by user", err);
   builder_.Cleanup();
-  EXPECT_EQ(0, fs_.Stat("out2"));
+  EXPECT_EQ(0, fs_.Stat("out2", &err));
+}
+
+TEST_F(BuildTest, StatFailureAbortsBuild) {
+  const string kTooLongToStat(400, 'i');
+  ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+("build " + kTooLongToStat + ": cat " + kTooLongToStat + "\n").c_str()));
+  // Also cyclic, for good measure.
+
+  // This simulates a stat failure:
+  fs_.files_[kTooLongToStat].mtime = -1;
+  fs_.files_[kTooLongToStat].stat_error = "stat failed";
+
+  string err;
+  EXPECT_FALSE(builder_.AddTarget(kTooLongToStat, &err));
+  EXPECT_EQ("stat failed", err);
 }
 
 TEST_F(BuildTest, PhonyWithNoInputs) {
@@ -1613,7 +1692,7 @@
     EXPECT_EQ("", err);
 
     // The deps file should have been removed.
-    EXPECT_EQ(0, fs_.Stat("in1.d"));
+    EXPECT_EQ(0, fs_.Stat("in1.d", &err));
     // Recreate it for the next step.
     fs_.Create("in1.d", "out: in2");
     deps_log.Close();
@@ -1693,7 +1772,7 @@
   fs_.Create("out", "");
 
   // The deps file should have been removed, so no need to timestamp it.
-  EXPECT_EQ(0, fs_.Stat("in1.d"));
+  EXPECT_EQ(0, fs_.Stat("in1.d", &err));
 
   {
     State state;
@@ -2043,6 +2122,23 @@
   ASSERT_EQ(0u, command_runner_.commands_ran_.size());
 }
 
+TEST_F(BuildTest, WrongOutputInDepfileCausesRebuild) {
+  string err;
+  const char* manifest =
+"rule cc\n"
+"  command = cc $in\n"
+"  depfile = $out.d\n"
+"build foo.o: cc foo.c\n";
+
+  fs_.Create("foo.c", "");
+  fs_.Create("foo.o", "");
+  fs_.Create("header.h", "");
+  fs_.Create("foo.o.d", "bar.o.d: header.h\n");
+
+  RebuildTarget("foo.o", manifest, "build_log", "ninja_deps");
+  ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+}
+
 TEST_F(BuildTest, Console) {
   ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
 "rule console\n"
diff --git a/src/clean.cc b/src/clean.cc
index 98c638c..1d6ba9e 100644
--- a/src/clean.cc
+++ b/src/clean.cc
@@ -49,7 +49,11 @@
 }
 
 bool Cleaner::FileExists(const string& path) {
-  return disk_interface_->Stat(path) > 0;
+  string err;
+  TimeStamp mtime = disk_interface_->Stat(path, &err);
+  if (mtime == -1)
+    Error("%s", err.c_str());
+  return mtime > 0;  // Treat Stat() errors as "file does not exist".
 }
 
 void Cleaner::Report(const string& path) {
@@ -220,7 +224,7 @@
   assert(rule);
 
   Reset();
-  const Rule* r = state_->LookupRule(rule);
+  const Rule* r = state_->bindings_.LookupRule(rule);
   if (r) {
     CleanRule(r);
   } else {
@@ -237,7 +241,7 @@
   PrintHeader();
   for (int i = 0; i < rule_count; ++i) {
     const char* rule_name = rules[i];
-    const Rule* rule = state_->LookupRule(rule_name);
+    const Rule* rule = state_->bindings_.LookupRule(rule_name);
     if (rule) {
       if (IsVerbose())
         printf("Rule %s\n", rule_name);
diff --git a/src/clean_test.cc b/src/clean_test.cc
index 5869bbb..395343b 100644
--- a/src/clean_test.cc
+++ b/src/clean_test.cc
@@ -44,10 +44,11 @@
   EXPECT_EQ(4u, fs_.files_removed_.size());
 
   // Check they are removed.
-  EXPECT_EQ(0, fs_.Stat("in1"));
-  EXPECT_EQ(0, fs_.Stat("out1"));
-  EXPECT_EQ(0, fs_.Stat("in2"));
-  EXPECT_EQ(0, fs_.Stat("out2"));
+  string err;
+  EXPECT_EQ(0, fs_.Stat("in1", &err));
+  EXPECT_EQ(0, fs_.Stat("out1", &err));
+  EXPECT_EQ(0, fs_.Stat("in2", &err));
+  EXPECT_EQ(0, fs_.Stat("out2", &err));
   fs_.files_removed_.clear();
 
   EXPECT_EQ(0, cleaner.CleanAll());
@@ -75,10 +76,11 @@
   EXPECT_EQ(0u, fs_.files_removed_.size());
 
   // Check they are not removed.
-  EXPECT_NE(0, fs_.Stat("in1"));
-  EXPECT_NE(0, fs_.Stat("out1"));
-  EXPECT_NE(0, fs_.Stat("in2"));
-  EXPECT_NE(0, fs_.Stat("out2"));
+  string err;
+  EXPECT_LT(0, fs_.Stat("in1", &err));
+  EXPECT_LT(0, fs_.Stat("out1", &err));
+  EXPECT_LT(0, fs_.Stat("in2", &err));
+  EXPECT_LT(0, fs_.Stat("out2", &err));
   fs_.files_removed_.clear();
 
   EXPECT_EQ(0, cleaner.CleanAll());
@@ -105,10 +107,11 @@
   EXPECT_EQ(2u, fs_.files_removed_.size());
 
   // Check they are removed.
-  EXPECT_EQ(0, fs_.Stat("in1"));
-  EXPECT_EQ(0, fs_.Stat("out1"));
-  EXPECT_NE(0, fs_.Stat("in2"));
-  EXPECT_NE(0, fs_.Stat("out2"));
+  string err;
+  EXPECT_EQ(0, fs_.Stat("in1", &err));
+  EXPECT_EQ(0, fs_.Stat("out1", &err));
+  EXPECT_LT(0, fs_.Stat("in2", &err));
+  EXPECT_LT(0, fs_.Stat("out2", &err));
   fs_.files_removed_.clear();
 
   ASSERT_EQ(0, cleaner.CleanTarget("out1"));
@@ -135,11 +138,12 @@
   EXPECT_EQ(2, cleaner.cleaned_files_count());
   EXPECT_EQ(0u, fs_.files_removed_.size());
 
-  // Check they are removed.
-  EXPECT_NE(0, fs_.Stat("in1"));
-  EXPECT_NE(0, fs_.Stat("out1"));
-  EXPECT_NE(0, fs_.Stat("in2"));
-  EXPECT_NE(0, fs_.Stat("out2"));
+  // Check they are not removed.
+  string err;
+  EXPECT_LT(0, fs_.Stat("in1", &err));
+  EXPECT_LT(0, fs_.Stat("out1", &err));
+  EXPECT_LT(0, fs_.Stat("in2", &err));
+  EXPECT_LT(0, fs_.Stat("out2", &err));
   fs_.files_removed_.clear();
 
   ASSERT_EQ(0, cleaner.CleanTarget("out1"));
@@ -168,10 +172,11 @@
   EXPECT_EQ(2u, fs_.files_removed_.size());
 
   // Check they are removed.
-  EXPECT_EQ(0, fs_.Stat("in1"));
-  EXPECT_NE(0, fs_.Stat("out1"));
-  EXPECT_EQ(0, fs_.Stat("in2"));
-  EXPECT_NE(0, fs_.Stat("out2"));
+  string err;
+  EXPECT_EQ(0, fs_.Stat("in1", &err));
+  EXPECT_LT(0, fs_.Stat("out1", &err));
+  EXPECT_EQ(0, fs_.Stat("in2", &err));
+  EXPECT_LT(0, fs_.Stat("out2", &err));
   fs_.files_removed_.clear();
 
   ASSERT_EQ(0, cleaner.CleanRule("cat_e"));
@@ -200,11 +205,12 @@
   EXPECT_EQ(2, cleaner.cleaned_files_count());
   EXPECT_EQ(0u, fs_.files_removed_.size());
 
-  // Check they are removed.
-  EXPECT_NE(0, fs_.Stat("in1"));
-  EXPECT_NE(0, fs_.Stat("out1"));
-  EXPECT_NE(0, fs_.Stat("in2"));
-  EXPECT_NE(0, fs_.Stat("out2"));
+  // Check they are not removed.
+  string err;
+  EXPECT_LT(0, fs_.Stat("in1", &err));
+  EXPECT_LT(0, fs_.Stat("out1", &err));
+  EXPECT_LT(0, fs_.Stat("in2", &err));
+  EXPECT_LT(0, fs_.Stat("out2", &err));
   fs_.files_removed_.clear();
 
   ASSERT_EQ(0, cleaner.CleanRule("cat_e"));
@@ -328,12 +334,13 @@
   EXPECT_EQ(6u, fs_.files_removed_.size());
 
   // Check they are removed.
-  EXPECT_EQ(0, fs_.Stat("in1"));
-  EXPECT_EQ(0, fs_.Stat("out1"));
-  EXPECT_EQ(0, fs_.Stat("in2"));
-  EXPECT_EQ(0, fs_.Stat("out2"));
-  EXPECT_EQ(0, fs_.Stat("in2.rsp"));
-  EXPECT_EQ(0, fs_.Stat("out2.rsp"));
+  string err;
+  EXPECT_EQ(0, fs_.Stat("in1", &err));
+  EXPECT_EQ(0, fs_.Stat("out1", &err));
+  EXPECT_EQ(0, fs_.Stat("in2", &err));
+  EXPECT_EQ(0, fs_.Stat("out2", &err));
+  EXPECT_EQ(0, fs_.Stat("in2.rsp", &err));
+  EXPECT_EQ(0, fs_.Stat("out2.rsp", &err));
 }
 
 TEST_F(CleanTest, CleanFailure) {
@@ -345,6 +352,7 @@
 }
 
 TEST_F(CleanTest, CleanPhony) {
+  string err;
   ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
 "build phony: phony t1 t2\n"
 "build t1: cat\n"
@@ -358,7 +366,7 @@
   Cleaner cleaner(&state_, config_, &fs_);
   EXPECT_EQ(0, cleaner.CleanAll());
   EXPECT_EQ(2, cleaner.cleaned_files_count());
-  EXPECT_NE(0, fs_.Stat("phony"));
+  EXPECT_LT(0, fs_.Stat("phony", &err));
 
   fs_.Create("t1", "");
   fs_.Create("t2", "");
@@ -366,7 +374,7 @@
   // Check that CleanTarget does not remove "phony".
   EXPECT_EQ(0, cleaner.CleanTarget("phony"));
   EXPECT_EQ(2, cleaner.cleaned_files_count());
-  EXPECT_NE(0, fs_.Stat("phony"));
+  EXPECT_LT(0, fs_.Stat("phony", &err));
 }
 
 TEST_F(CleanTest, CleanDepFileAndRspFileWithSpaces) {
@@ -391,8 +399,9 @@
   EXPECT_EQ(4, cleaner.cleaned_files_count());
   EXPECT_EQ(4u, fs_.files_removed_.size());
 
-  EXPECT_EQ(0, fs_.Stat("out 1"));
-  EXPECT_EQ(0, fs_.Stat("out 2"));
-  EXPECT_EQ(0, fs_.Stat("out 1.d"));
-  EXPECT_EQ(0, fs_.Stat("out 2.rsp"));
+  string err;
+  EXPECT_EQ(0, fs_.Stat("out 1", &err));
+  EXPECT_EQ(0, fs_.Stat("out 2", &err));
+  EXPECT_EQ(0, fs_.Stat("out 1.d", &err));
+  EXPECT_EQ(0, fs_.Stat("out 2.rsp", &err));
 }
diff --git a/src/depfile_parser.cc b/src/depfile_parser.cc
index 4ca3943..7268f31 100644
--- a/src/depfile_parser.cc
+++ b/src/depfile_parser.cc
@@ -230,5 +230,9 @@
       return false;
     }
   }
+  if (parsing_targets) {
+    *err = "expected ':' in depfile";
+    return false;
+  }
   return true;
 }
diff --git a/src/depfile_parser.in.cc b/src/depfile_parser.in.cc
index b59baf0..deaee5b 100644
--- a/src/depfile_parser.in.cc
+++ b/src/depfile_parser.in.cc
@@ -112,5 +112,9 @@
       return false;
     }
   }
+  if (parsing_targets) {
+    *err = "expected ':' in depfile";
+    return false;
+  }
   return true;
 }
diff --git a/src/depfile_parser_test.cc b/src/depfile_parser_test.cc
index e67ef79..8b57a1e 100644
--- a/src/depfile_parser_test.cc
+++ b/src/depfile_parser_test.cc
@@ -106,7 +106,7 @@
   // it through.
   string err;
   EXPECT_TRUE(Parse(
-"\\!\\@\\#$$\\%\\^\\&\\\\",
+"\\!\\@\\#$$\\%\\^\\&\\\\:",
       &err));
   ASSERT_EQ("", err);
   EXPECT_EQ("\\!\\@#$\\%\\^\\&\\",
diff --git a/src/deps_log.cc b/src/deps_log.cc
index fc46497..89c6023 100644
--- a/src/deps_log.cc
+++ b/src/deps_log.cc
@@ -239,13 +239,13 @@
       if (buf[path_size - 1] == '\0') --path_size;
       if (buf[path_size - 1] == '\0') --path_size;
       if (buf[path_size - 1] == '\0') --path_size;
-      StringPiece path(buf, path_size);
+      StringPiece subpath(buf, path_size);
       // It is not necessary to pass in a correct slash_bits here. It will
       // either be a Node that's in the manifest (in which case it will already
       // have a correct slash_bits that GetNode will look up), or it is an
       // implicit dependency from a .d which does not affect the build command
       // (and so need not have its slashes maintained).
-      Node* node = state->GetNode(path, 0);
+      Node* node = state->GetNode(subpath, 0);
 
       // Check that the expected index matches the actual index. This can only
       // happen if two ninja processes write to the same deps log concurrently.
@@ -275,7 +275,7 @@
     }
     fclose(f);
 
-    if (!Truncate(path.c_str(), offset, err))
+    if (!Truncate(path, offset, err))
       return false;
 
     // The truncate succeeded; we'll just report the load error as a
@@ -307,8 +307,6 @@
 
 bool DepsLog::Recompact(const string& path, string* err) {
   METRIC_RECORD(".ninja_deps recompact");
-  if (!quiet_)
-    printf("Recompacting deps...\n");
 
   Close();
   string temp_path = path + ".recompact";
diff --git a/src/deps_log.h b/src/deps_log.h
index 9b81bc1..cec0257 100644
--- a/src/deps_log.h
+++ b/src/deps_log.h
@@ -64,7 +64,7 @@
 /// wins, allowing updates to just be appended to the file.  A separate
 /// repacking step can run occasionally to remove dead records.
 struct DepsLog {
-  DepsLog() : needs_recompaction_(false), quiet_(false), file_(NULL) {}
+  DepsLog() : needs_recompaction_(false), file_(NULL) {}
   ~DepsLog();
 
   // Writing (build-time) interface.
@@ -87,7 +87,6 @@
 
   /// Rewrite the known log entries, throwing away old data.
   bool Recompact(const string& path, string* err);
-  void set_quiet(bool quiet) { quiet_ = quiet; }
 
   /// Returns if the deps entry for a node is still reachable from the manifest.
   ///
@@ -109,7 +108,6 @@
   bool RecordId(Node* node);
 
   bool needs_recompaction_;
-  bool quiet_;
   FILE* file_;
 
   /// Maps id -> Node.
diff --git a/src/deps_log_test.cc b/src/deps_log_test.cc
index ac2b315..cab06fb 100644
--- a/src/deps_log_test.cc
+++ b/src/deps_log_test.cc
@@ -252,7 +252,6 @@
     ASSERT_EQ("foo.h", deps->nodes[0]->path());
     ASSERT_EQ("baz.h", deps->nodes[1]->path());
 
-    log.set_quiet(true);
     ASSERT_TRUE(log.Recompact(kTestFilename, &err));
 
     // The in-memory deps graph should still be valid after recompaction.
@@ -302,7 +301,6 @@
     ASSERT_EQ("foo.h", deps->nodes[0]->path());
     ASSERT_EQ("baz.h", deps->nodes[1]->path());
 
-    log.set_quiet(true);
     ASSERT_TRUE(log.Recompact(kTestFilename, &err));
 
     // The previous entries should have been removed.
diff --git a/src/disk_interface.cc b/src/disk_interface.cc
index 9810708..70282c0 100644
--- a/src/disk_interface.cc
+++ b/src/disk_interface.cc
@@ -23,6 +23,7 @@
 #include <sys/types.h>
 
 #ifdef _WIN32
+#include <sstream>
 #include <windows.h>
 #include <direct.h>  // _mkdir
 #endif
@@ -67,16 +68,13 @@
   return (TimeStamp)mtime;
 }
 
-TimeStamp StatSingleFile(const string& path, bool quiet) {
+TimeStamp StatSingleFile(const string& path, string* err) {
   WIN32_FILE_ATTRIBUTE_DATA attrs;
   if (!GetFileAttributesEx(path.c_str(), GetFileExInfoStandard, &attrs)) {
-    DWORD err = GetLastError();
-    if (err == ERROR_FILE_NOT_FOUND || err == ERROR_PATH_NOT_FOUND)
+    DWORD win_err = GetLastError();
+    if (win_err == ERROR_FILE_NOT_FOUND || win_err == ERROR_PATH_NOT_FOUND)
       return 0;
-    if (!quiet) {
-      Error("GetFileAttributesEx(%s): %s", path.c_str(),
-            GetLastErrorString().c_str());
-    }
+    *err = "GetFileAttributesEx(" + path + "): " + GetLastErrorString();
     return -1;
   }
   return TimeStampFromFileTime(attrs.ftLastWriteTime);
@@ -98,7 +96,7 @@
 #endif
 
 bool StatAllFilesInDir(const string& dir, map<string, TimeStamp>* stamps,
-                       bool quiet) {
+                       string* err) {
   // FindExInfoBasic is 30% faster than FindExInfoStandard.
   static bool can_use_basic_info = IsWindows7OrLater();
   // This is not in earlier SDKs.
@@ -111,13 +109,10 @@
                                         FindExSearchNameMatch, NULL, 0);
 
   if (find_handle == INVALID_HANDLE_VALUE) {
-    DWORD err = GetLastError();
-    if (err == ERROR_FILE_NOT_FOUND || err == ERROR_PATH_NOT_FOUND)
+    DWORD win_err = GetLastError();
+    if (win_err == ERROR_FILE_NOT_FOUND || win_err == ERROR_PATH_NOT_FOUND)
       return true;
-    if (!quiet) {
-      Error("FindFirstFileExA(%s): %s", dir.c_str(),
-            GetLastErrorString().c_str());
-    }
+    *err = "FindFirstFileExA(" + dir + "): " + GetLastErrorString();
     return false;
   }
   do {
@@ -139,9 +134,12 @@
   string dir = DirName(path);
   if (dir.empty())
     return true;  // Reached root; assume it's there.
-  TimeStamp mtime = Stat(dir);
-  if (mtime < 0)
-    return false;  // Error.
+  string err;
+  TimeStamp mtime = Stat(dir, &err);
+  if (mtime < 0) {
+    Error("%s", err.c_str());
+    return false;
+  }
   if (mtime > 0)
     return true;  // Exists already; we're done.
 
@@ -154,19 +152,19 @@
 
 // RealDiskInterface -----------------------------------------------------------
 
-TimeStamp RealDiskInterface::Stat(const string& path) const {
+TimeStamp RealDiskInterface::Stat(const string& path, string* err) const {
 #ifdef _WIN32
   // MSDN: "Naming Files, Paths, and Namespaces"
   // http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
   if (!path.empty() && path[0] != '\\' && path.size() > MAX_PATH) {
-    if (!quiet_) {
-      Error("Stat(%s): Filename longer than %i characters",
-            path.c_str(), MAX_PATH);
-    }
+    ostringstream err_stream;
+    err_stream << "Stat(" << path << "): Filename longer than " << MAX_PATH
+               << " characters";
+    *err = err_stream.str();
     return -1;
   }
   if (!use_cache_)
-    return StatSingleFile(path, quiet_);
+    return StatSingleFile(path, err);
 
   string dir = DirName(path);
   string base(path.substr(dir.size() ? dir.size() + 1 : 0));
@@ -177,7 +175,7 @@
   Cache::iterator ci = cache_.find(dir);
   if (ci == cache_.end()) {
     ci = cache_.insert(make_pair(dir, DirCache())).first;
-    if (!StatAllFilesInDir(dir.empty() ? "." : dir, &ci->second, quiet_)) {
+    if (!StatAllFilesInDir(dir.empty() ? "." : dir, &ci->second, err)) {
       cache_.erase(ci);
       return -1;
     }
@@ -189,9 +187,7 @@
   if (stat(path.c_str(), &st) < 0) {
     if (errno == ENOENT || errno == ENOTDIR)
       return 0;
-    if (!quiet_) {
-      Error("stat(%s): %s", path.c_str(), strerror(errno));
-    }
+    *err = "stat(" + path + "): " + strerror(errno);
     return -1;
   }
   return st.st_mtime;
diff --git a/src/disk_interface.h b/src/disk_interface.h
index a13bced..b61d192 100644
--- a/src/disk_interface.h
+++ b/src/disk_interface.h
@@ -30,7 +30,7 @@
 
   /// stat() a file, returning the mtime, or 0 if missing and -1 on
   /// other errors.
-  virtual TimeStamp Stat(const string& path) const = 0;
+  virtual TimeStamp Stat(const string& path, string* err) const = 0;
 
   /// Create a directory, returning false on failure.
   virtual bool MakeDir(const string& path) = 0;
@@ -56,21 +56,18 @@
 
 /// Implementation of DiskInterface that actually hits the disk.
 struct RealDiskInterface : public DiskInterface {
-  RealDiskInterface() : quiet_(false)
+  RealDiskInterface()
 #ifdef _WIN32
-                      , use_cache_(false)
+                      : use_cache_(false)
 #endif
                       {}
   virtual ~RealDiskInterface() {}
-  virtual TimeStamp Stat(const string& path) const;
+  virtual TimeStamp Stat(const string& path, string* err) const;
   virtual bool MakeDir(const string& path);
   virtual bool WriteFile(const string& path, const string& contents);
   virtual string ReadFile(const string& path, string* err);
   virtual int RemoveFile(const string& path);
 
-  /// Whether to print on errors.  Used to make a test quieter.
-  bool quiet_;
-
   /// Whether stat information can be cached.  Only has an effect on Windows.
   void AllowStatCache(bool allow);
 
diff --git a/src/disk_interface_test.cc b/src/disk_interface_test.cc
index 05d509c..9d210b4 100644
--- a/src/disk_interface_test.cc
+++ b/src/disk_interface_test.cc
@@ -47,49 +47,64 @@
 };
 
 TEST_F(DiskInterfaceTest, StatMissingFile) {
-  EXPECT_EQ(0, disk_.Stat("nosuchfile"));
+  string err;
+  EXPECT_EQ(0, disk_.Stat("nosuchfile", &err));
+  EXPECT_EQ("", err);
 
   // On Windows, the errno for a file in a nonexistent directory
   // is different.
-  EXPECT_EQ(0, disk_.Stat("nosuchdir/nosuchfile"));
+  EXPECT_EQ(0, disk_.Stat("nosuchdir/nosuchfile", &err));
+  EXPECT_EQ("", err);
 
   // On POSIX systems, the errno is different if a component of the
   // path prefix is not a directory.
   ASSERT_TRUE(Touch("notadir"));
-  EXPECT_EQ(0, disk_.Stat("notadir/nosuchfile"));
+  EXPECT_EQ(0, disk_.Stat("notadir/nosuchfile", &err));
+  EXPECT_EQ("", err);
 }
 
 TEST_F(DiskInterfaceTest, StatBadPath) {
-  disk_.quiet_ = true;
+  string err;
 #ifdef _WIN32
   string bad_path("cc:\\foo");
-  EXPECT_EQ(-1, disk_.Stat(bad_path));
+  EXPECT_EQ(-1, disk_.Stat(bad_path, &err));
+  EXPECT_NE("", err);
 #else
   string too_long_name(512, 'x');
-  EXPECT_EQ(-1, disk_.Stat(too_long_name));
+  EXPECT_EQ(-1, disk_.Stat(too_long_name, &err));
+  EXPECT_NE("", err);
 #endif
-  disk_.quiet_ = false;
 }
 
 TEST_F(DiskInterfaceTest, StatExistingFile) {
+  string err;
   ASSERT_TRUE(Touch("file"));
-  EXPECT_GT(disk_.Stat("file"), 1);
+  EXPECT_GT(disk_.Stat("file", &err), 1);
+  EXPECT_EQ("", err);
 }
 
 TEST_F(DiskInterfaceTest, StatExistingDir) {
+  string err;
   ASSERT_TRUE(disk_.MakeDir("subdir"));
   ASSERT_TRUE(disk_.MakeDir("subdir/subsubdir"));
-  EXPECT_GT(disk_.Stat("."), 1);
-  EXPECT_GT(disk_.Stat("subdir"), 1);
-  EXPECT_GT(disk_.Stat("subdir/subsubdir"), 1);
+  EXPECT_GT(disk_.Stat(".", &err), 1);
+  EXPECT_EQ("", err);
+  EXPECT_GT(disk_.Stat("subdir", &err), 1);
+  EXPECT_EQ("", err);
+  EXPECT_GT(disk_.Stat("subdir/subsubdir", &err), 1);
+  EXPECT_EQ("", err);
 
-  EXPECT_EQ(disk_.Stat("subdir"), disk_.Stat("subdir/."));
-  EXPECT_EQ(disk_.Stat("subdir"), disk_.Stat("subdir/subsubdir/.."));
-  EXPECT_EQ(disk_.Stat("subdir/subsubdir"), disk_.Stat("subdir/subsubdir/."));
+  EXPECT_EQ(disk_.Stat("subdir", &err),
+            disk_.Stat("subdir/.", &err));
+  EXPECT_EQ(disk_.Stat("subdir", &err),
+            disk_.Stat("subdir/subsubdir/..", &err));
+  EXPECT_EQ(disk_.Stat("subdir/subsubdir", &err),
+            disk_.Stat("subdir/subsubdir/.", &err));
 }
 
 #ifdef _WIN32
 TEST_F(DiskInterfaceTest, StatCache) {
+  string err;
   disk_.AllowStatCache(true);
 
   ASSERT_TRUE(Touch("file1"));
@@ -100,27 +115,43 @@
   ASSERT_TRUE(Touch("subdir\\SUBFILE2"));
   ASSERT_TRUE(Touch("subdir\\SUBFILE3"));
 
-  EXPECT_GT(disk_.Stat("FIle1"), 1);
-  EXPECT_GT(disk_.Stat("file1"), 1);
+  EXPECT_GT(disk_.Stat("FIle1", &err), 1);
+  EXPECT_EQ("", err);
+  EXPECT_GT(disk_.Stat("file1", &err), 1);
+  EXPECT_EQ("", err);
 
-  EXPECT_GT(disk_.Stat("subdir/subfile2"), 1);
-  EXPECT_GT(disk_.Stat("sUbdir\\suBFile1"), 1);
+  EXPECT_GT(disk_.Stat("subdir/subfile2", &err), 1);
+  EXPECT_EQ("", err);
+  EXPECT_GT(disk_.Stat("sUbdir\\suBFile1", &err), 1);
+  EXPECT_EQ("", err);
 
-  EXPECT_GT(disk_.Stat("."), 1);
-  EXPECT_GT(disk_.Stat("subdir"), 1);
-  EXPECT_GT(disk_.Stat("subdir/subsubdir"), 1);
+  EXPECT_GT(disk_.Stat(".", &err), 1);
+  EXPECT_EQ("", err);
+  EXPECT_GT(disk_.Stat("subdir", &err), 1);
+  EXPECT_EQ("", err);
+  EXPECT_GT(disk_.Stat("subdir/subsubdir", &err), 1);
+  EXPECT_EQ("", err);
 
-  EXPECT_EQ(disk_.Stat("subdir"), disk_.Stat("subdir/."));
-  EXPECT_EQ(disk_.Stat("subdir"), disk_.Stat("subdir/subsubdir/.."));
-  EXPECT_EQ(disk_.Stat("subdir/subsubdir"), disk_.Stat("subdir/subsubdir/."));
+  EXPECT_EQ(disk_.Stat("subdir", &err),
+            disk_.Stat("subdir/.", &err));
+  EXPECT_EQ("", err);
+  EXPECT_EQ(disk_.Stat("subdir", &err),
+            disk_.Stat("subdir/subsubdir/..", &err));
+  EXPECT_EQ("", err);
+  EXPECT_EQ(disk_.Stat("subdir/subsubdir", &err),
+            disk_.Stat("subdir/subsubdir/.", &err));
+  EXPECT_EQ("", err);
 
   // Test error cases.
-  disk_.quiet_ = true;
   string bad_path("cc:\\foo");
-  EXPECT_EQ(-1, disk_.Stat(bad_path));
-  EXPECT_EQ(-1, disk_.Stat(bad_path));
-  EXPECT_EQ(0, disk_.Stat("nosuchfile"));
-  EXPECT_EQ(0, disk_.Stat("nosuchdir/nosuchfile"));
+  EXPECT_EQ(-1, disk_.Stat(bad_path, &err));
+  EXPECT_NE("", err); err.clear();
+  EXPECT_EQ(-1, disk_.Stat(bad_path, &err));
+  EXPECT_NE("", err); err.clear();
+  EXPECT_EQ(0, disk_.Stat("nosuchfile", &err));
+  EXPECT_EQ("", err);
+  EXPECT_EQ(0, disk_.Stat("nosuchdir/nosuchfile", &err));
+  EXPECT_EQ("", err);
 }
 #endif
 
@@ -168,7 +199,7 @@
   StatTest() : scan_(&state_, NULL, NULL, this) {}
 
   // DiskInterface implementation.
-  virtual TimeStamp Stat(const string& path) const;
+  virtual TimeStamp Stat(const string& path, string* err) const;
   virtual bool WriteFile(const string& path, const string& contents) {
     assert(false);
     return true;
@@ -191,7 +222,7 @@
   mutable vector<string> stats_;
 };
 
-TimeStamp StatTest::Stat(const string& path) const {
+TimeStamp StatTest::Stat(const string& path, string* err) const {
   stats_.push_back(path);
   map<string, TimeStamp>::const_iterator i = mtimes_.find(path);
   if (i == mtimes_.end())
@@ -204,7 +235,9 @@
 "build out: cat in\n"));
 
   Node* out = GetNode("out");
-  out->Stat(this);
+  string err;
+  EXPECT_TRUE(out->Stat(this, &err));
+  EXPECT_EQ("", err);
   ASSERT_EQ(1u, stats_.size());
   scan_.RecomputeDirty(out->in_edge(), NULL);
   ASSERT_EQ(2u, stats_.size());
@@ -218,7 +251,9 @@
 "build mid: cat in\n"));
 
   Node* out = GetNode("out");
-  out->Stat(this);
+  string err;
+  EXPECT_TRUE(out->Stat(this, &err));
+  EXPECT_EQ("", err);
   ASSERT_EQ(1u, stats_.size());
   scan_.RecomputeDirty(out->in_edge(), NULL);
   ASSERT_EQ(3u, stats_.size());
@@ -236,7 +271,9 @@
 "build mid2: cat in21 in22\n"));
 
   Node* out = GetNode("out");
-  out->Stat(this);
+  string err;
+  EXPECT_TRUE(out->Stat(this, &err));
+  EXPECT_EQ("", err);
   ASSERT_EQ(1u, stats_.size());
   scan_.RecomputeDirty(out->in_edge(), NULL);
   ASSERT_EQ(1u + 6u, stats_.size());
@@ -255,7 +292,9 @@
   mtimes_["out"] = 1;
 
   Node* out = GetNode("out");
-  out->Stat(this);
+  string err;
+  EXPECT_TRUE(out->Stat(this, &err));
+  EXPECT_EQ("", err);
   ASSERT_EQ(1u, stats_.size());
   scan_.RecomputeDirty(out->in_edge(), NULL);
   ASSERT_FALSE(GetNode("in")->dirty());
diff --git a/src/edit_distance.cc b/src/edit_distance.cc
index 9553c6e..a6719d3 100644
--- a/src/edit_distance.cc
+++ b/src/edit_distance.cc
@@ -28,7 +28,7 @@
   //   http://en.wikipedia.org/wiki/Levenshtein_distance
   //
   // Although the algorithm is typically described using an m x n
-  // array, only two rows are used at a time, so this implemenation
+  // array, only two rows are used at a time, so this implementation
   // just keeps two separate vectors for those two rows.
   int m = s1.len_;
   int n = s2.len_;
diff --git a/src/eval_env.cc b/src/eval_env.cc
index 834b7e1..e991d21 100644
--- a/src/eval_env.cc
+++ b/src/eval_env.cc
@@ -12,6 +12,8 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+#include <assert.h>
+
 #include "eval_env.h"
 
 string BindingEnv::LookupVariable(const string& var) {
@@ -27,6 +29,55 @@
   bindings_[key] = val;
 }
 
+void BindingEnv::AddRule(const Rule* rule) {
+  assert(LookupRuleCurrentScope(rule->name()) == NULL);
+  rules_[rule->name()] = rule;
+}
+
+const Rule* BindingEnv::LookupRuleCurrentScope(const string& rule_name) {
+  map<string, const Rule*>::iterator i = rules_.find(rule_name);
+  if (i == rules_.end())
+    return NULL;
+  return i->second;
+}
+
+const Rule* BindingEnv::LookupRule(const string& rule_name) {
+  map<string, const Rule*>::iterator i = rules_.find(rule_name);
+  if (i != rules_.end())
+    return i->second;
+  if (parent_)
+    return parent_->LookupRule(rule_name);
+  return NULL;
+}
+
+void Rule::AddBinding(const string& key, const EvalString& val) {
+  bindings_[key] = val;
+}
+
+const EvalString* Rule::GetBinding(const string& key) const {
+  map<string, EvalString>::const_iterator i = bindings_.find(key);
+  if (i == bindings_.end())
+    return NULL;
+  return &i->second;
+}
+
+// static
+bool Rule::IsReservedBinding(const string& var) {
+  return var == "command" ||
+      var == "depfile" ||
+      var == "description" ||
+      var == "deps" ||
+      var == "generator" ||
+      var == "pool" ||
+      var == "restat" ||
+      var == "rspfile" ||
+      var == "rspfile_content";
+}
+
+const map<string, const Rule*>& BindingEnv::GetRules() const {
+  return rules_;
+}
+
 string BindingEnv::LookupWithFallback(const string& var,
                                       const EvalString* eval,
                                       Env* env) {
diff --git a/src/eval_env.h b/src/eval_env.h
index f3c959a..28c4d16 100644
--- a/src/eval_env.h
+++ b/src/eval_env.h
@@ -22,7 +22,7 @@
 
 #include "string_piece.h"
 
-struct EvalString;
+struct Rule;
 
 /// An interface for a scope for variable (e.g. "$foo") lookups.
 struct Env {
@@ -30,30 +30,6 @@
   virtual string LookupVariable(const string& var) = 0;
 };
 
-/// An Env which contains a mapping of variables to values
-/// as well as a pointer to a parent scope.
-struct BindingEnv : public Env {
-  BindingEnv() : parent_(NULL) {}
-  explicit BindingEnv(Env* parent) : parent_(parent) {}
-
-  virtual ~BindingEnv() {}
-  virtual string LookupVariable(const string& var);
-
-  void AddBinding(const string& key, const string& val);
-
-  /// This is tricky.  Edges want lookup scope to go in this order:
-  /// 1) value set on edge itself (edge_->env_)
-  /// 2) value set on rule, with expansion in the edge's scope
-  /// 3) value set on enclosing scope of edge (edge_->env_->parent_)
-  /// This function takes as parameters the necessary info to do (2).
-  string LookupWithFallback(const string& var, const EvalString* eval,
-                            Env* env);
-
-private:
-  map<string, string> bindings_;
-  Env* parent_;
-};
-
 /// A tokenized string that contains variable references.
 /// Can be evaluated relative to an Env.
 struct EvalString {
@@ -75,4 +51,55 @@
   TokenList parsed_;
 };
 
+/// An invokable build command and associated metadata (description, etc.).
+struct Rule {
+  explicit Rule(const string& name) : name_(name) {}
+
+  const string& name() const { return name_; }
+
+  typedef map<string, EvalString> Bindings;
+  void AddBinding(const string& key, const EvalString& val);
+
+  static bool IsReservedBinding(const string& var);
+
+  const EvalString* GetBinding(const string& key) const;
+
+ private:
+  // Allow the parsers to reach into this object and fill out its fields.
+  friend struct ManifestParser;
+
+  string name_;
+  map<string, EvalString> bindings_;
+};
+
+/// An Env which contains a mapping of variables to values
+/// as well as a pointer to a parent scope.
+struct BindingEnv : public Env {
+  BindingEnv() : parent_(NULL) {}
+  explicit BindingEnv(BindingEnv* parent) : parent_(parent) {}
+
+  virtual ~BindingEnv() {}
+  virtual string LookupVariable(const string& var);
+
+  void AddRule(const Rule* rule);
+  const Rule* LookupRule(const string& rule_name);
+  const Rule* LookupRuleCurrentScope(const string& rule_name);
+  const map<string, const Rule*>& GetRules() const;
+
+  void AddBinding(const string& key, const string& val);
+
+  /// This is tricky.  Edges want lookup scope to go in this order:
+  /// 1) value set on edge itself (edge_->env_)
+  /// 2) value set on rule, with expansion in the edge's scope
+  /// 3) value set on enclosing scope of edge (edge_->env_->parent_)
+  /// This function takes as parameters the necessary info to do (2).
+  string LookupWithFallback(const string& var, const EvalString* eval,
+                            Env* env);
+
+private:
+  map<string, string> bindings_;
+  map<string, const Rule*> rules_;
+  BindingEnv* parent_;
+};
+
 #endif  // NINJA_EVAL_ENV_H_
diff --git a/src/graph.cc b/src/graph.cc
index 2829669..355285c 100644
--- a/src/graph.cc
+++ b/src/graph.cc
@@ -27,34 +27,9 @@
 #include "state.h"
 #include "util.h"
 
-bool Node::Stat(DiskInterface* disk_interface) {
+bool Node::Stat(DiskInterface* disk_interface, string* err) {
   METRIC_RECORD("node stat");
-  mtime_ = disk_interface->Stat(path_);
-  return mtime_ > 0;
-}
-
-void Rule::AddBinding(const string& key, const EvalString& val) {
-  bindings_[key] = val;
-}
-
-const EvalString* Rule::GetBinding(const string& key) const {
-  map<string, EvalString>::const_iterator i = bindings_.find(key);
-  if (i == bindings_.end())
-    return NULL;
-  return &i->second;
-}
-
-// static
-bool Rule::IsReservedBinding(const string& var) {
-  return var == "command" ||
-      var == "depfile" ||
-      var == "description" ||
-      var == "deps" ||
-      var == "generator" ||
-      var == "pool" ||
-      var == "restat" ||
-      var == "rspfile" ||
-      var == "rspfile_content";
+  return (mtime_ = disk_interface->Stat(path_, err)) != -1;
 }
 
 bool DependencyScan::RecomputeDirty(Edge* edge, string* err) {
@@ -62,10 +37,25 @@
   edge->outputs_ready_ = true;
   edge->deps_missing_ = false;
 
+  // RecomputeDirty() recursively walks the graph following the input nodes
+  // of |edge| and the in_edges of these nodes.  It uses the stat state of each
+  // node to mark nodes as visited and doesn't traverse across nodes that have
+  // been visited already.  To make sure that every edge is visited only once
+  // (important because an edge's deps are loaded every time it's visited), mark
+  // all outputs of |edge| visited as a first step.  This ensures that edges
+  // with multiple inputs and outputs are visited only once, even in cyclic
+  // graphs.
+  for (vector<Node*>::iterator o = edge->outputs_.begin();
+       o != edge->outputs_.end(); ++o) {
+    if (!(*o)->StatIfNecessary(disk_interface_, err))
+      return false;
+  }
+
   if (!dep_loader_.LoadDeps(edge, err)) {
     if (!err->empty())
       return false;
     // Failed to load dependency info: rebuild to regenerate it.
+    // LoadDeps() did EXPLAIN() already, no need to do it here.
     dirty = edge->deps_missing_ = true;
   }
 
@@ -73,7 +63,9 @@
   Node* most_recent_input = NULL;
   for (vector<Node*>::iterator i = edge->inputs_.begin();
        i != edge->inputs_.end(); ++i) {
-    if ((*i)->StatIfNecessary(disk_interface_)) {
+    if (!(*i)->status_known()) {
+      if (!(*i)->StatIfNecessary(disk_interface_, err))
+        return false;
       if (Edge* in_edge = (*i)->in_edge()) {
         if (!RecomputeDirty(in_edge, err))
           return false;
@@ -108,15 +100,14 @@
   // We may also be dirty due to output state: missing outputs, out of
   // date outputs, etc.  Visit all outputs and determine whether they're dirty.
   if (!dirty)
-    dirty = RecomputeOutputsDirty(edge, most_recent_input);
+    if (!RecomputeOutputsDirty(edge, most_recent_input, &dirty, err))
+      return false;
 
-  // Finally, visit each output to mark off that we've visited it, and update
-  // their dirty state if necessary.
-  for (vector<Node*>::iterator i = edge->outputs_.begin();
-       i != edge->outputs_.end(); ++i) {
-    (*i)->StatIfNecessary(disk_interface_);
+  // Finally, visit each output and update their dirty state if necessary.
+  for (vector<Node*>::iterator o = edge->outputs_.begin();
+       o != edge->outputs_.end(); ++o) {
     if (dirty)
-      (*i)->MarkDirty();
+      (*o)->MarkDirty();
   }
 
   // If an edge is dirty, its outputs are normally not ready.  (It's
@@ -130,16 +121,19 @@
   return true;
 }
 
-bool DependencyScan::RecomputeOutputsDirty(Edge* edge,
-                                           Node* most_recent_input) {
-  string command = edge->EvaluateCommand(true);
-  for (vector<Node*>::iterator i = edge->outputs_.begin();
-       i != edge->outputs_.end(); ++i) {
-    (*i)->StatIfNecessary(disk_interface_);
-    if (RecomputeOutputDirty(edge, most_recent_input, command, *i))
+bool DependencyScan::RecomputeOutputsDirty(Edge* edge, Node* most_recent_input,
+                                           bool* outputs_dirty, string* err) {
+  string command = edge->EvaluateCommand(/*incl_rsp_file=*/true);
+  for (vector<Node*>::iterator o = edge->outputs_.begin();
+       o != edge->outputs_.end(); ++o) {
+    if (!(*o)->StatIfNecessary(disk_interface_, err))
+      return false;
+    if (RecomputeOutputDirty(edge, most_recent_input, command, *o)) {
+      *outputs_dirty = true;
       return true;
+    }
   }
-  return false;
+  return true;
 }
 
 bool DependencyScan::RecomputeOutputDirty(Edge* edge,
@@ -149,7 +143,12 @@
   if (edge->is_phony()) {
     // Phony edges don't write any output.  Outputs are only dirty if
     // there are no inputs and we're missing the output.
-    return edge->inputs_.empty() && !output->exists();
+    if (edge->inputs_.empty() && !output->exists()) {
+      EXPLAIN("output %s of phony edge with no inputs doesn't exist",
+              output->path().c_str());
+      return true;
+    }
+    return false;
   }
 
   BuildLog::LogEntry* entry = 0;
@@ -217,8 +216,8 @@
 struct EdgeEnv : public Env {
   enum EscapeKind { kShellEscape, kDoNotEscape };
 
-  explicit EdgeEnv(Edge* edge, EscapeKind escape)
-      : edge_(edge), escape_in_out_(escape) {}
+  EdgeEnv(Edge* edge, EscapeKind escape)
+      : edge_(edge), escape_in_out_(escape), recursive_(false) {}
   virtual string LookupVariable(const string& var);
 
   /// Given a span of Nodes, construct a list of paths suitable for a command
@@ -227,8 +226,11 @@
                       vector<Node*>::iterator end,
                       char sep);
 
+ private:
+  vector<string> lookups_;
   Edge* edge_;
   EscapeKind escape_in_out_;
+  bool recursive_;
 };
 
 string EdgeEnv::LookupVariable(const string& var) {
@@ -244,8 +246,25 @@
                         ' ');
   }
 
+  if (recursive_) {
+    vector<string>::const_iterator it;
+    if ((it = find(lookups_.begin(), lookups_.end(), var)) != lookups_.end()) {
+      string cycle;
+      for (; it != lookups_.end(); ++it)
+        cycle.append(*it + " -> ");
+      cycle.append(var);
+      Fatal(("cycle in rule variables: " + cycle).c_str());
+    }
+  }
+
   // See notes on BindingEnv::LookupWithFallback.
   const EvalString* eval = edge_->rule_->GetBinding(var);
+  if (recursive_ && eval)
+    lookups_.push_back(var);
+
+  // In practice, variables defined on rules never use another rule variable.
+  // For performance, only start checking for cycles after the first lookup.
+  recursive_ = true;
   return edge_->env_->LookupWithFallback(var, eval, this);
 }
 
@@ -398,12 +417,13 @@
                         &depfile.out_.len_, &unused, err))
     return false;
 
-  // Check that this depfile matches the edge's output.
+  // Check that this depfile matches the edge's output, if not return false to
+  // mark the edge as dirty.
   Node* first_output = edge->outputs_[0];
   StringPiece opath = StringPiece(first_output->path());
   if (opath != depfile.out_) {
-    *err = "expected depfile '" + path + "' to mention '" +
-        first_output->path() + "', got '" + depfile.out_.AsString() + "'";
+    EXPLAIN("expected depfile '%s' to mention '%s', got '%s'", path.c_str(),
+            first_output->path().c_str(), depfile.out_.AsString().c_str());
     return false;
   }
 
@@ -439,7 +459,7 @@
 
   // Deps are invalid if the output is newer than the deps.
   if (output->mtime() > deps->mtime) {
-    EXPLAIN("stored deps info out of date for for '%s' (%d vs %d)",
+    EXPLAIN("stored deps info out of date for '%s' (%d vs %d)",
             output->path().c_str(), deps->mtime, output->mtime());
     return false;
   }
diff --git a/src/graph.h b/src/graph.h
index 9aada38..5f8d41a 100644
--- a/src/graph.h
+++ b/src/graph.h
@@ -41,15 +41,14 @@
         in_edge_(NULL),
         id_(-1) {}
 
-  /// Return true if the file exists (mtime_ got a value).
-  bool Stat(DiskInterface* disk_interface);
+  /// Return false on error.
+  bool Stat(DiskInterface* disk_interface, string* err);
 
-  /// Return true if we needed to stat.
-  bool StatIfNecessary(DiskInterface* disk_interface) {
+  /// Return false on error.
+  bool StatIfNecessary(DiskInterface* disk_interface, string* err) {
     if (status_known())
-      return false;
-    Stat(disk_interface);
-    return true;
+      return true;
+    return Stat(disk_interface, err);
   }
 
   /// Mark as not-yet-stat()ed and not dirty.
@@ -121,30 +120,10 @@
   int id_;
 };
 
-/// An invokable build command and associated metadata (description, etc.).
-struct Rule {
-  explicit Rule(const string& name) : name_(name) {}
-
-  const string& name() const { return name_; }
-
-  typedef map<string, EvalString> Bindings;
-  void AddBinding(const string& key, const EvalString& val);
-
-  static bool IsReservedBinding(const string& var);
-
-  const EvalString* GetBinding(const string& key) const;
-
- private:
-  // Allow the parsers to reach into this object and fill out its fields.
-  friend struct ManifestParser;
-
-  string name_;
-  map<string, EvalString> bindings_;
-};
-
 /// An edge in the dependency graph; links between Nodes using Rules.
 struct Edge {
-  Edge() : rule_(NULL), env_(NULL), outputs_ready_(false), deps_missing_(false),
+  Edge() : rule_(NULL), pool_(NULL), env_(NULL),
+           outputs_ready_(false), deps_missing_(false),
            implicit_deps_(0), order_only_deps_(0) {}
 
   /// Return true if all inputs' in-edges are ready.
@@ -257,9 +236,10 @@
   /// Returns false on failure.
   bool RecomputeDirty(Edge* edge, string* err);
 
-  /// Recompute whether any output of the edge is dirty.
-  /// Returns true if so.
-  bool RecomputeOutputsDirty(Edge* edge, Node* most_recent_input);
+  /// Recompute whether any output of the edge is dirty, if so sets |*dirty|.
+  /// Returns false on failure.
+  bool RecomputeOutputsDirty(Edge* edge, Node* most_recent_input,
+                             bool* dirty, string* err);
 
   BuildLog* build_log() const {
     return build_log_;
diff --git a/src/graph_test.cc b/src/graph_test.cc
index 382d352..e41f5cc 100644
--- a/src/graph_test.cc
+++ b/src/graph_test.cc
@@ -252,6 +252,81 @@
   ASSERT_FALSE(plan_.more_to_do());
 }
 
+// Verify that cycles in graphs with multiple outputs are handled correctly
+// in RecomputeDirty() and don't cause deps to be loaded multiple times.
+TEST_F(GraphTest, CycleWithLengthZeroFromDepfile) {
+  AssertParse(&state_,
+"rule deprule\n"
+"   depfile = dep.d\n"
+"   command = unused\n"
+"build a b: deprule\n"
+  );
+  fs_.Create("dep.d", "a: b\n");
+
+  string err;
+  Edge* edge = GetNode("a")->in_edge();
+  EXPECT_TRUE(scan_.RecomputeDirty(edge, &err));
+  ASSERT_EQ("", err);
+
+  // Despite the depfile causing edge to be a cycle (it has outputs a and b,
+  // but the depfile also adds b as an input), the deps should have been loaded
+  // only once:
+  EXPECT_EQ(1, edge->inputs_.size());
+  EXPECT_EQ("b", edge->inputs_[0]->path());
+}
+
+// Like CycleWithLengthZeroFromDepfile but with a higher cycle length.
+TEST_F(GraphTest, CycleWithLengthOneFromDepfile) {
+  AssertParse(&state_,
+"rule deprule\n"
+"   depfile = dep.d\n"
+"   command = unused\n"
+"rule r\n"
+"   command = unused\n"
+"build a b: deprule\n"
+"build c: r b\n"
+  );
+  fs_.Create("dep.d", "a: c\n");
+
+  string err;
+  Edge* edge = GetNode("a")->in_edge();
+  EXPECT_TRUE(scan_.RecomputeDirty(edge, &err));
+  ASSERT_EQ("", err);
+
+  // Despite the depfile causing edge to be a cycle (|edge| has outputs a and b,
+  // but c's in_edge has b as input but the depfile also adds |edge| as
+  // output)), the deps should have been loaded only once:
+  EXPECT_EQ(1, edge->inputs_.size());
+  EXPECT_EQ("c", edge->inputs_[0]->path());
+}
+
+// Like CycleWithLengthOneFromDepfile but building a node one hop away from
+// the cycle.
+TEST_F(GraphTest, CycleWithLengthOneFromDepfileOneHopAway) {
+  AssertParse(&state_,
+"rule deprule\n"
+"   depfile = dep.d\n"
+"   command = unused\n"
+"rule r\n"
+"   command = unused\n"
+"build a b: deprule\n"
+"build c: r b\n"
+"build d: r a\n"
+  );
+  fs_.Create("dep.d", "a: c\n");
+
+  string err;
+  EXPECT_TRUE(scan_.RecomputeDirty(GetNode("d")->in_edge(), &err));
+  ASSERT_EQ("", err);
+
+  // Despite the depfile causing edge to be a cycle (|edge| has outputs a and b,
+  // but c's in_edge has b as input but the depfile also adds |edge| as
+  // output)), the deps should have been loaded only once:
+  Edge* edge = GetNode("a")->in_edge();
+  EXPECT_EQ(1, edge->inputs_.size());
+  EXPECT_EQ("c", edge->inputs_[0]->path());
+}
+
 #ifdef _WIN32
 TEST_F(GraphTest, Decanonicalize) {
   ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
diff --git a/src/graphviz.cc b/src/graphviz.cc
index 8354a22..dce8b32 100644
--- a/src/graphviz.cc
+++ b/src/graphviz.cc
@@ -15,6 +15,7 @@
 #include "graphviz.h"
 
 #include <stdio.h>
+#include <algorithm>
 
 #include "graph.h"
 
@@ -22,7 +23,9 @@
   if (visited_nodes_.find(node) != visited_nodes_.end())
     return;
 
-  printf("\"%p\" [label=\"%s\"]\n", node, node->path().c_str());
+  string pathstr = node->path();
+  replace(pathstr.begin(), pathstr.end(), '\\', '/');
+  printf("\"%p\" [label=\"%s\"]\n", node, pathstr.c_str());
   visited_nodes_.insert(node);
 
   Edge* edge = node->in_edge();
diff --git a/src/hash_collision_bench.cc b/src/hash_collision_bench.cc
index d0eabde..5be0531 100644
--- a/src/hash_collision_bench.cc
+++ b/src/hash_collision_bench.cc
@@ -46,7 +46,7 @@
 
   sort(hashes, hashes + N);
 
-  int num_collisions = 0;
+  int collision_count = 0;
   for (int i = 1; i < N; ++i) {
     if (hashes[i - 1].first == hashes[i].first) {
       if (strcmp(commands[hashes[i - 1].second],
@@ -54,9 +54,9 @@
         printf("collision!\n  string 1: '%s'\n  string 2: '%s'\n",
                commands[hashes[i - 1].second],
                commands[hashes[i].second]);
-        num_collisions++;
+        collision_count++;
       }
     }
   }
-  printf("\n\n%d collisions after %d runs\n", num_collisions, N);
+  printf("\n\n%d collisions after %d runs\n", collision_count, N);
 }
diff --git a/src/hash_map.h b/src/hash_map.h
index 77e7586..abdba92 100644
--- a/src/hash_map.h
+++ b/src/hash_map.h
@@ -50,7 +50,22 @@
   return h;
 }
 
-#ifdef _MSC_VER
+#if (__cplusplus >= 201103L) || (_MSC_VER >= 1900)
+#include <unordered_map>
+
+namespace std {
+template<>
+struct hash<StringPiece> {
+  typedef StringPiece argument_type;
+  typedef size_t result_type;
+
+  size_t operator()(StringPiece key) const {
+    return MurmurHash2(key.str_, key.len_);
+  }
+};
+}
+
+#elif defined(_MSC_VER)
 #include <hash_map>
 
 using stdext::hash_map;
@@ -73,26 +88,17 @@
 };
 
 #else
-
 #include <ext/hash_map>
 
 using __gnu_cxx::hash_map;
 
 namespace __gnu_cxx {
 template<>
-struct hash<std::string> {
-  size_t operator()(const std::string& s) const {
-    return hash<const char*>()(s.c_str());
-  }
-};
-
-template<>
 struct hash<StringPiece> {
   size_t operator()(StringPiece key) const {
     return MurmurHash2(key.str_, key.len_);
   }
 };
-
 }
 #endif
 
@@ -102,7 +108,9 @@
 /// mapping StringPiece => Foo*.
 template<typename V>
 struct ExternalStringHashMap {
-#ifdef _MSC_VER
+#if (__cplusplus >= 201103L) || (_MSC_VER >= 1900)
+  typedef std::unordered_map<StringPiece, V> Type;
+#elif defined(_MSC_VER)
   typedef hash_map<StringPiece, V, StringPieceCmp> Type;
 #else
   typedef hash_map<StringPiece, V> Type;
diff --git a/src/line_printer.cc b/src/line_printer.cc
index 813f63e..2cd3e17 100644
--- a/src/line_printer.cc
+++ b/src/line_printer.cc
@@ -50,29 +50,21 @@
     return;
   }
 
-#ifdef _WIN32
-  CONSOLE_SCREEN_BUFFER_INFO csbi;
-  GetConsoleScreenBufferInfo(console_, &csbi);
-#endif
-
   if (smart_terminal_) {
-#ifndef _WIN32
     printf("\r");  // Print over previous line, if any.
-#else
-    csbi.dwCursorPosition.X = 0;
-    SetConsoleCursorPosition(console_, csbi.dwCursorPosition);
-#endif
+    // On Windows, calling a C library function writing to stdout also handles
+    // pausing the executable when the "Pause" key or Ctrl-S is pressed.
   }
 
   if (smart_terminal_ && type == ELIDE) {
 #ifdef _WIN32
-    // Don't use the full width or console will move to next line.
-    size_t width = static_cast<size_t>(csbi.dwSize.X) - 1;
-    to_print = ElideMiddle(to_print, width);
-    // We don't want to have the cursor spamming back and forth, so
-    // use WriteConsoleOutput instead which updates the contents of
-    // the buffer, but doesn't move the cursor position.
+    CONSOLE_SCREEN_BUFFER_INFO csbi;
     GetConsoleScreenBufferInfo(console_, &csbi);
+
+    to_print = ElideMiddle(to_print, static_cast<size_t>(csbi.dwSize.X));
+    // We don't want to have the cursor spamming back and forth, so instead of
+    // printf use WriteConsoleOutput which updates the contents of the buffer,
+    // but doesn't move the cursor position.
     COORD buf_size = { csbi.dwSize.X, 1 };
     COORD zero_zero = { 0, 0 };
     SMALL_RECT target = {
@@ -80,16 +72,12 @@
       static_cast<SHORT>(csbi.dwCursorPosition.X + csbi.dwSize.X - 1),
       csbi.dwCursorPosition.Y
     };
-    CHAR_INFO* char_data = new CHAR_INFO[csbi.dwSize.X];
-    memset(char_data, 0, sizeof(CHAR_INFO) * csbi.dwSize.X);
-    for (int i = 0; i < csbi.dwSize.X; ++i) {
-      char_data[i].Char.AsciiChar = ' ';
+    vector<CHAR_INFO> char_data(csbi.dwSize.X);
+    for (size_t i = 0; i < static_cast<size_t>(csbi.dwSize.X); ++i) {
+      char_data[i].Char.AsciiChar = i < to_print.size() ? to_print[i] : ' ';
       char_data[i].Attributes = csbi.wAttributes;
     }
-    for (size_t i = 0; i < to_print.size(); ++i)
-      char_data[i].Char.AsciiChar = to_print[i];
-    WriteConsoleOutput(console_, char_data, buf_size, zero_zero, &target);
-    delete[] char_data;
+    WriteConsoleOutput(console_, &char_data[0], buf_size, zero_zero, &target);
 #else
     // Limit output to width of the terminal if provided so we don't cause
     // line-wrapping.
diff --git a/src/manifest_parser.cc b/src/manifest_parser.cc
index 388b5bc..e8c0436 100644
--- a/src/manifest_parser.cc
+++ b/src/manifest_parser.cc
@@ -24,8 +24,10 @@
 #include "util.h"
 #include "version.h"
 
-ManifestParser::ManifestParser(State* state, FileReader* file_reader)
-  : state_(state), file_reader_(file_reader) {
+ManifestParser::ManifestParser(State* state, FileReader* file_reader,
+                               bool dupe_edge_should_err)
+    : state_(state), file_reader_(file_reader),
+      dupe_edge_should_err_(dupe_edge_should_err), quiet_(false) {
   env_ = &state->bindings_;
 }
 
@@ -156,7 +158,7 @@
   if (!ExpectToken(Lexer::NEWLINE, err))
     return false;
 
-  if (state_->LookupRule(name) != NULL)
+  if (env_->LookupRuleCurrentScope(name) != NULL)
     return lexer_.Error("duplicate rule '" + name + "'", err);
 
   Rule* rule = new Rule(name);  // XXX scoped_ptr
@@ -185,7 +187,7 @@
   if (rule->bindings_["command"].empty())
     return lexer_.Error("expected 'command =' line", err);
 
-  state_->AddRule(rule);
+  env_->AddRule(rule);
   return true;
 }
 
@@ -252,7 +254,7 @@
   if (!lexer_.ReadIdent(&rule_name))
     return lexer_.Error("expected build command name", err);
 
-  const Rule* rule = state_->LookupRule(rule_name);
+  const Rule* rule = env_->LookupRule(rule_name);
   if (!rule)
     return lexer_.Error("unknown build rule '" + rule_name + "'", err);
 
@@ -298,16 +300,16 @@
     return false;
 
   // Bindings on edges are rare, so allocate per-edge envs only when needed.
-  bool hasIdent = lexer_.PeekToken(Lexer::INDENT);
-  BindingEnv* env = hasIdent ? new BindingEnv(env_) : env_;
-  while (hasIdent) {
+  bool has_indent_token = lexer_.PeekToken(Lexer::INDENT);
+  BindingEnv* env = has_indent_token ? new BindingEnv(env_) : env_;
+  while (has_indent_token) {
     string key;
     EvalString val;
     if (!ParseLet(&key, &val, err))
       return false;
 
     env->AddBinding(key, val.Evaluate(env_));
-    hasIdent = lexer_.PeekToken(Lexer::INDENT);
+    has_indent_token = lexer_.PeekToken(Lexer::INDENT);
   }
 
   Edge* edge = state_->AddEdge(rule);
@@ -321,6 +323,35 @@
     edge->pool_ = pool;
   }
 
+  edge->outputs_.reserve(outs.size());
+  for (vector<EvalString>::iterator i = outs.begin(); i != outs.end(); ++i) {
+    string path = i->Evaluate(env);
+    string path_err;
+    unsigned int slash_bits;
+    if (!CanonicalizePath(&path, &slash_bits, &path_err))
+      return lexer_.Error(path_err, err);
+    if (!state_->AddOut(edge, path, slash_bits)) {
+      if (dupe_edge_should_err_) {
+        lexer_.Error("multiple rules generate " + path + " [-w dupbuild=err]",
+                     err);
+        return false;
+      } else if (!quiet_) {
+        Warning("multiple rules generate %s. "
+                "builds involving this target will not be correct; "
+                "continuing anyway [-w dupbuild=warn]",
+                path.c_str());
+      }
+    }
+  }
+  if (edge->outputs_.empty()) {
+    // All outputs of the edge are already created by other edges. Don't add
+    // this edge.  Do this check before input nodes are connected to the edge.
+    state_->edges_.pop_back();
+    delete edge;
+    return true;
+  }
+
+  edge->inputs_.reserve(ins.size());
   for (vector<EvalString>::iterator i = ins.begin(); i != ins.end(); ++i) {
     string path = i->Evaluate(env);
     string path_err;
@@ -329,14 +360,6 @@
       return lexer_.Error(path_err, err);
     state_->AddIn(edge, path, slash_bits);
   }
-  for (vector<EvalString>::iterator i = outs.begin(); i != outs.end(); ++i) {
-    string path = i->Evaluate(env);
-    string path_err;
-    unsigned int slash_bits;
-    if (!CanonicalizePath(&path, &slash_bits, &path_err))
-      return lexer_.Error(path_err, err);
-    state_->AddOut(edge, path, slash_bits);
-  }
   edge->implicit_deps_ = implicit;
   edge->order_only_deps_ = order_only;
 
diff --git a/src/manifest_parser.h b/src/manifest_parser.h
index 5212f72..f72cd6f 100644
--- a/src/manifest_parser.h
+++ b/src/manifest_parser.h
@@ -32,13 +32,15 @@
     virtual bool ReadFile(const string& path, string* content, string* err) = 0;
   };
 
-  ManifestParser(State* state, FileReader* file_reader);
+  ManifestParser(State* state, FileReader* file_reader,
+                 bool dupe_edge_should_err = false);
 
   /// Load and parse a file.
-  bool Load(const string& filename, string* err, Lexer* parent=NULL);
+  bool Load(const string& filename, string* err, Lexer* parent = NULL);
 
   /// Parse a text string of input.  Used by tests.
   bool ParseTest(const string& input, string* err) {
+    quiet_ = true;
     return Parse("input", input, err);
   }
 
@@ -64,6 +66,8 @@
   BindingEnv* env_;
   FileReader* file_reader_;
   Lexer lexer_;
+  bool dupe_edge_should_err_;
+  bool quiet_;
 };
 
 #endif  // NINJA_MANIFEST_PARSER_H_
diff --git a/src/manifest_parser_perftest.cc b/src/manifest_parser_perftest.cc
index ca62fb2..6b56ab0 100644
--- a/src/manifest_parser_perftest.cc
+++ b/src/manifest_parser_perftest.cc
@@ -42,15 +42,19 @@
   }
 };
 
-bool WriteFakeManifests(const string& dir) {
+bool WriteFakeManifests(const string& dir, string* err) {
   RealDiskInterface disk_interface;
-  if (disk_interface.Stat(dir + "/build.ninja") > 0)
-    return true;
+  TimeStamp mtime = disk_interface.Stat(dir + "/build.ninja", err);
+  if (mtime != 0)  // 0 means that the file doesn't exist yet.
+    return mtime != -1;
 
+  string command = "python misc/write_fake_manifests.py " + dir;
   printf("Creating manifest data..."); fflush(stdout);
-  int err = system(("python misc/write_fake_manifests.py " + dir).c_str());
+  int exit_code = system(command.c_str());
   printf("done.\n");
-  return err == 0;
+  if (exit_code != 0)
+    *err = "Failed to run " + command;
+  return exit_code == 0;
 }
 
 int LoadManifests(bool measure_command_evaluation) {
@@ -93,8 +97,9 @@
 
   const char kManifestDir[] = "build/manifest_perftest";
 
-  if (!WriteFakeManifests(kManifestDir)) {
-    fprintf(stderr, "Failed to write test data\n");
+  string err;
+  if (!WriteFakeManifests(kManifestDir, &err)) {
+    fprintf(stderr, "Failed to write test data: %s\n", err.c_str());
     return 1;
   }
 
diff --git a/src/manifest_parser_test.cc b/src/manifest_parser_test.cc
index 6909ea9..8f7b575 100644
--- a/src/manifest_parser_test.cc
+++ b/src/manifest_parser_test.cc
@@ -28,6 +28,7 @@
     string err;
     EXPECT_TRUE(parser.ParseTest(input, &err));
     ASSERT_EQ("", err);
+    VerifyGraph(state);
   }
 
   virtual bool ReadFile(const string& path, string* content, string* err) {
@@ -60,8 +61,8 @@
 "\n"
 "build result: cat in_1.cc in-2.O\n"));
 
-  ASSERT_EQ(3u, state.rules_.size());
-  const Rule* rule = state.rules_.begin()->second;
+  ASSERT_EQ(3u, state.bindings_.GetRules().size());
+  const Rule* rule = state.bindings_.GetRules().begin()->second;
   EXPECT_EQ("cat", rule->name());
   EXPECT_EQ("[cat ][$in][ > ][$out]",
             rule->GetBinding("command")->Serialize());
@@ -93,8 +94,8 @@
 "build result: cat in_1.cc in-2.O\n"
 "  #comment\n"));
 
-  ASSERT_EQ(2u, state.rules_.size());
-  const Rule* rule = state.rules_.begin()->second;
+  ASSERT_EQ(2u, state.bindings_.GetRules().size());
+  const Rule* rule = state.bindings_.GetRules().begin()->second;
   EXPECT_EQ("cat", rule->name());
   Edge* edge = state.GetNode("result", 0)->in_edge();
   EXPECT_TRUE(edge->GetBindingBool("restat"));
@@ -126,8 +127,8 @@
 "build out: cat_rsp in\n"
 "  rspfile=out.rsp\n"));
 
-  ASSERT_EQ(2u, state.rules_.size());
-  const Rule* rule = state.rules_.begin()->second;
+  ASSERT_EQ(2u, state.bindings_.GetRules().size());
+  const Rule* rule = state.bindings_.GetRules().begin()->second;
   EXPECT_EQ("cat_rsp", rule->name());
   EXPECT_EQ("[cat ][$rspfile][ > ][$out]",
             rule->GetBinding("command")->Serialize());
@@ -143,8 +144,8 @@
 "build out: cat_rsp in in2\n"
 "  rspfile=out.rsp\n"));
 
-  ASSERT_EQ(2u, state.rules_.size());
-  const Rule* rule = state.rules_.begin()->second;
+  ASSERT_EQ(2u, state.bindings_.GetRules().size());
+  const Rule* rule = state.bindings_.GetRules().begin()->second;
   EXPECT_EQ("cat_rsp", rule->name());
   EXPECT_EQ("[cat ][$in_newline][ > ][$out]",
             rule->GetBinding("command")->Serialize());
@@ -204,8 +205,8 @@
 "build a: link c $\n"
 " d e f\n"));
 
-  ASSERT_EQ(2u, state.rules_.size());
-  const Rule* rule = state.rules_.begin()->second;
+  ASSERT_EQ(2u, state.bindings_.GetRules().size());
+  const Rule* rule = state.bindings_.GetRules().begin()->second;
   EXPECT_EQ("link", rule->name());
   EXPECT_EQ("[foo bar baz]", rule->GetBinding("command")->Serialize());
 }
@@ -340,6 +341,42 @@
 }
 #endif
 
+TEST_F(ParserTest, DuplicateEdgeWithMultipleOutputs) {
+  ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n"
+"  command = cat $in > $out\n"
+"build out1 out2: cat in1\n"
+"build out1: cat in2\n"
+"build final: cat out1\n"
+));
+  // AssertParse() checks that the generated build graph is self-consistent.
+  // That's all the checking that this test needs.
+}
+
+TEST_F(ParserTest, NoDeadPointerFromDuplicateEdge) {
+  ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n"
+"  command = cat $in > $out\n"
+"build out: cat in\n"
+"build out: cat in\n"
+));
+  // AssertParse() checks that the generated build graph is self-consistent.
+  // That's all the checking that this test needs.
+}
+
+TEST_F(ParserTest, DuplicateEdgeWithMultipleOutputsError) {
+  const char kInput[] =
+"rule cat\n"
+"  command = cat $in > $out\n"
+"build out1 out2: cat in1\n"
+"build out1: cat in2\n"
+"build final: cat out1\n";
+  ManifestParser parser(&state, this, /*dupe_edges_should_err=*/true);
+  string err;
+  EXPECT_FALSE(parser.ParseTest(kInput, &err));
+  EXPECT_EQ("input:5: multiple rules generate out1 [-w dupbuild=err]\n", err);
+}
+
 TEST_F(ParserTest, ReservedWords) {
   ASSERT_NO_FATAL_FAILURE(AssertParse(
 "rule build\n"
@@ -823,18 +860,27 @@
 }
 
 TEST_F(ParserTest, DuplicateRuleInDifferentSubninjas) {
-  // Test that rules live in a global namespace and aren't scoped to subninjas.
+  // Test that rules are scoped to subninjas.
   files_["test.ninja"] = "rule cat\n"
                          "  command = cat\n";
   ManifestParser parser(&state, this);
   string err;
-  EXPECT_FALSE(parser.ParseTest("rule cat\n"
+  EXPECT_TRUE(parser.ParseTest("rule cat\n"
                                 "  command = cat\n"
                                 "subninja test.ninja\n", &err));
-  EXPECT_EQ("test.ninja:1: duplicate rule 'cat'\n"
-            "rule cat\n"
-            "        ^ near here"
-            , err);
+}
+
+TEST_F(ParserTest, DuplicateRuleInDifferentSubninjasWithInclude) {
+  // Test that rules are scoped to subninjas even with includes.
+  files_["rules.ninja"] = "rule cat\n"
+                         "  command = cat\n";
+  files_["test.ninja"] = "include rules.ninja\n"
+                         "build x : cat\n";
+  ManifestParser parser(&state, this);
+  string err;
+  EXPECT_TRUE(parser.ParseTest("include rules.ninja\n"
+                                "subninja test.ninja\n"
+                                "build y : cat\n", &err));
 }
 
 TEST_F(ParserTest, Include) {
@@ -891,6 +937,16 @@
   EXPECT_EQ("", err);
 }
 
+TEST_F(ParserTest, DefaultDefaultCycle) {
+  ASSERT_NO_FATAL_FAILURE(AssertParse(
+"rule cat\n  command = cat $in > $out\n"
+"build a: cat a\n"));
+
+  string err;
+  EXPECT_EQ(0u, state.DefaultNodes(&err).size());
+  EXPECT_EQ("could not determine root nodes of build graph", err);
+}
+
 TEST_F(ParserTest, DefaultStatements) {
   ASSERT_NO_FATAL_FAILURE(AssertParse(
 "rule cat\n  command = cat $in > $out\n"
diff --git a/src/ninja.cc b/src/ninja.cc
index 2c890c2..e5bf98d 100644
--- a/src/ninja.cc
+++ b/src/ninja.cc
@@ -64,6 +64,9 @@
 
   /// Tool to run rather than building.
   const Tool* tool;
+
+  /// Whether duplicate rules for one target should warn or print an error.
+  bool dupe_edges_should_err;
 };
 
 /// The Ninja main() loads up a series of data structures; various tools need
@@ -140,6 +143,8 @@
 
   virtual bool IsPathDead(StringPiece s) const {
     Node* n = state_.LookupNode(s);
+    if (!n || !n->in_edge())
+      return false;
     // Just checking n isn't enough: If an old output is both in the build log
     // and in the deps log, it will have a Node object in state_.  (It will also
     // have an in edge if one of its inputs is another output that's in the deps
@@ -149,7 +154,11 @@
     // which seems good enough for this corner case.)
     // Do keep entries around for files which still exist on disk, for
     // generators that want to use this information.
-    return (!n || !n->in_edge()) && disk_interface_.Stat(s.AsString()) == 0;
+    string err;
+    TimeStamp mtime = disk_interface_.Stat(s.AsString(), &err);
+    if (mtime == -1)
+      Error("%s", err.c_str());  // Log and ignore Stat() errors.
+    return mtime == 0;
   }
 };
 
@@ -192,14 +201,15 @@
 "  -f FILE  specify input build file [default=build.ninja]\n"
 "\n"
 "  -j N     run N jobs in parallel [default=%d, derived from CPUs available]\n"
-"  -l N     do not start new jobs if the load average is greater than N\n"
 "  -k N     keep going until N jobs fail [default=1]\n"
+"  -l N     do not start new jobs if the load average is greater than N\n"
 "  -n       dry run (don't run commands but act like they succeeded)\n"
 "  -v       show all command lines while building\n"
 "\n"
 "  -d MODE  enable debugging (use -d list to list modes)\n"
 "  -t TOOL  run a subtool (use -t list to list subtools)\n"
-"    terminates toplevel options; further flags are passed to the tool\n",
+"    terminates toplevel options; further flags are passed to the tool\n"
+"  -w FLAG  adjust warnings (use -w list to list warnings)\n",
           kNinjaVersion, config.parallelism);
 }
 
@@ -241,12 +251,11 @@
 
   if (builder.AlreadyUpToDate())
     return false;  // Not an error, but we didn't rebuild.
-  if (!builder.Build(err))
-    return false;
 
-  // The manifest was only rebuilt if it is now dirty (it may have been cleaned
-  // by a restat).
-  return node->dirty();
+  // Even if the manifest was cleaned by a restat rule, claim that it was
+  // rebuilt.  Not doing so can lead to crashes, see
+  // https://github.com/martine/ninja/issues/874
+  return builder.Build(err);
 }
 
 Node* NinjaMain::CollectTarget(const char* cpath, string* err) {
@@ -478,7 +487,10 @@
       continue;
     }
 
-    TimeStamp mtime = disk_interface.Stat((*it)->path());
+    string err;
+    TimeStamp mtime = disk_interface.Stat((*it)->path(), &err);
+    if (mtime == -1)
+      Error("%s", err.c_str());  // Log and ignore Stat() errors;
     printf("%s: #deps %d, deps mtime %d (%s)\n",
            (*it)->path().c_str(), deps->node_count, deps->mtime,
            (!mtime || mtime > deps->mtime ? "STALE":"VALID"));
@@ -793,6 +805,32 @@
   }
 }
 
+/// Set a warning flag.  Returns false if Ninja should exit instead  of
+/// continuing.
+bool WarningEnable(const string& name, Options* options) {
+  if (name == "list") {
+    printf("warning flags:\n"
+"  dupbuild={err,warn}  multiple build lines for one target\n");
+    return false;
+  } else if (name == "dupbuild=err") {
+    options->dupe_edges_should_err = true;
+    return true;
+  } else if (name == "dupbuild=warn") {
+    options->dupe_edges_should_err = false;
+    return true;
+  } else {
+    const char* suggestion =
+        SpellcheckString(name.c_str(), "dupbuild=err", "dupbuild=warn", NULL);
+    if (suggestion) {
+      Error("unknown warning flag '%s', did you mean '%s'?",
+            name.c_str(), suggestion);
+    } else {
+      Error("unknown warning flag '%s'", name.c_str());
+    }
+    return false;
+  }
+}
+
 bool NinjaMain::OpenBuildLog(bool recompact_only) {
   string log_path = ".ninja_log";
   if (!build_dir_.empty())
@@ -963,7 +1001,7 @@
 
   int opt;
   while (!options->tool &&
-         (opt = getopt_long(*argc, *argv, "d:f:j:k:l:nt:vC:h", kLongOptions,
+         (opt = getopt_long(*argc, *argv, "d:f:j:k:l:nt:vw:C:h", kLongOptions,
                             NULL)) != -1) {
     switch (opt) {
       case 'd':
@@ -1012,6 +1050,10 @@
       case 'v':
         config->verbosity = BuildConfig::VERBOSE;
         break;
+      case 'w':
+        if (!WarningEnable(optarg, options))
+          return 1;
+        break;
       case 'C':
         options->working_dir = optarg;
         break;
@@ -1062,13 +1104,14 @@
     return (ninja.*options.tool->func)(argc, argv);
   }
 
-  // The build can take up to 2 passes: one to rebuild the manifest, then
-  // another to build the desired target.
-  for (int cycle = 0; cycle < 2; ++cycle) {
+  // Limit number of rebuilds, to prevent infinite loops.
+  const int kCycleLimit = 100;
+  for (int cycle = 1; cycle <= kCycleLimit; ++cycle) {
     NinjaMain ninja(ninja_command, config);
 
     RealFileReader file_reader;
-    ManifestParser parser(&ninja.state_, &file_reader);
+    ManifestParser parser(&ninja.state_, &file_reader,
+                          options.dupe_edges_should_err);
     string err;
     if (!parser.Load(options.input_file, &err)) {
       Error("%s", err.c_str());
@@ -1087,16 +1130,13 @@
     if (options.tool && options.tool->when == Tool::RUN_AFTER_LOGS)
       return (ninja.*options.tool->func)(argc, argv);
 
-    // The first time through, attempt to rebuild the manifest before
-    // building anything else.
-    if (cycle == 0) {
-      if (ninja.RebuildManifest(options.input_file, &err)) {
-        // Start the build over with the new manifest.
-        continue;
-      } else if (!err.empty()) {
-        Error("rebuilding '%s': %s", options.input_file, err.c_str());
-        return 1;
-      }
+    // Attempt to rebuild the manifest before building anything else
+    if (ninja.RebuildManifest(options.input_file, &err)) {
+      // Start the build over with the new manifest.
+      continue;
+    } else if (!err.empty()) {
+      Error("rebuilding '%s': %s", options.input_file, err.c_str());
+      return 1;
     }
 
     int result = ninja.RunBuild(argc, argv);
@@ -1105,7 +1145,9 @@
     return result;
   }
 
-  return 1;  // Shouldn't be reached.
+  Error("manifest '%s' still dirty after %d tries\n",
+      options.input_file, kCycleLimit);
+  return 1;
 }
 
 }  // anonymous namespace
diff --git a/src/state.cc b/src/state.cc
index 6e3e10d..a70f211 100644
--- a/src/state.cc
+++ b/src/state.cc
@@ -61,6 +61,7 @@
   }
 }
 
+// static
 bool Pool::WeightedEdgeCmp(const Edge* a, const Edge* b) {
   if (!a) return b;
   if (!b) return false;
@@ -73,23 +74,11 @@
 const Rule State::kPhonyRule("phony");
 
 State::State() {
-  AddRule(&kPhonyRule);
+  bindings_.AddRule(&kPhonyRule);
   AddPool(&kDefaultPool);
   AddPool(&kConsolePool);
 }
 
-void State::AddRule(const Rule* rule) {
-  assert(LookupRule(rule->name()) == NULL);
-  rules_[rule->name()] = rule;
-}
-
-const Rule* State::LookupRule(const string& rule_name) {
-  map<string, const Rule*>::iterator i = rules_.find(rule_name);
-  if (i == rules_.end())
-    return NULL;
-  return i->second;
-}
-
 void State::AddPool(Pool* pool) {
   assert(LookupPool(pool->name()) == NULL);
   pools_[pool->name()] = pool;
@@ -151,16 +140,13 @@
   node->AddOutEdge(edge);
 }
 
-void State::AddOut(Edge* edge, StringPiece path, unsigned int slash_bits) {
+bool State::AddOut(Edge* edge, StringPiece path, unsigned int slash_bits) {
   Node* node = GetNode(path, slash_bits);
+  if (node->in_edge())
+    return false;
   edge->outputs_.push_back(node);
-  if (node->in_edge()) {
-    Warning("multiple rules generate %s. "
-            "builds involving this target will not be correct; "
-            "continuing anyway",
-            path.AsString().c_str());
-  }
   node->set_in_edge(edge);
+  return true;
 }
 
 bool State::AddDefault(StringPiece path, string* err) {
@@ -187,7 +173,6 @@
   if (!edges_.empty() && root_nodes.empty())
     *err = "could not determine root nodes of build graph";
 
-  assert(edges_.empty() || !root_nodes.empty());
   return root_nodes;
 }
 
diff --git a/src/state.h b/src/state.h
index 8804532..d7987ba 100644
--- a/src/state.h
+++ b/src/state.h
@@ -37,13 +37,14 @@
 /// the total scheduled weight diminishes enough (i.e. when a scheduled edge
 /// completes).
 struct Pool {
-  explicit Pool(const string& name, int depth)
-    : name_(name), current_use_(0), depth_(depth), delayed_(&WeightedEdgeCmp) { }
+  Pool(const string& name, int depth)
+    : name_(name), current_use_(0), depth_(depth), delayed_(&WeightedEdgeCmp) {}
 
   // A depth of 0 is infinite
   bool is_valid() const { return depth_ >= 0; }
   int depth() const { return depth_; }
   const string& name() const { return name_; }
+  int current_use() const { return current_use_; }
 
   /// true if the Pool might delay this edge
   bool ShouldDelayEdge() const { return depth_ != 0; }
@@ -79,7 +80,7 @@
   DelayedEdges delayed_;
 };
 
-/// Global state (file status, loaded rules) for a single run.
+/// Global state (file status) for a single run.
 struct State {
   static Pool kDefaultPool;
   static Pool kConsolePool;
@@ -87,9 +88,6 @@
 
   State();
 
-  void AddRule(const Rule* rule);
-  const Rule* LookupRule(const string& rule_name);
-
   void AddPool(Pool* pool);
   Pool* LookupPool(const string& pool_name);
 
@@ -100,7 +98,7 @@
   Node* SpellcheckNode(const string& path);
 
   void AddIn(Edge* edge, StringPiece path, unsigned int slash_bits);
-  void AddOut(Edge* edge, StringPiece path, unsigned int slash_bits);
+  bool AddOut(Edge* edge, StringPiece path, unsigned int slash_bits);
   bool AddDefault(StringPiece path, string* error);
 
   /// Reset state.  Keeps all nodes and edges, but restores them to the
@@ -119,9 +117,6 @@
   typedef ExternalStringHashMap<Node*>::Type Paths;
   Paths paths_;
 
-  /// All the rules used in the graph.
-  map<string, const Rule*> rules_;
-
   /// All the pools used in the graph.
   map<string, Pool*> pools_;
 
diff --git a/src/state_test.cc b/src/state_test.cc
index bd133b6..458b519 100644
--- a/src/state_test.cc
+++ b/src/state_test.cc
@@ -29,7 +29,7 @@
 
   Rule* rule = new Rule("cat");
   rule->AddBinding("command", command);
-  state.AddRule(rule);
+  state.bindings_.AddRule(rule);
 
   Edge* edge = state.AddEdge(rule);
   state.AddIn(edge, "in1", 0);
diff --git a/src/subprocess-posix.cc b/src/subprocess-posix.cc
index 743e406..f3baec2 100644
--- a/src/subprocess-posix.cc
+++ b/src/subprocess-posix.cc
@@ -28,6 +28,7 @@
 Subprocess::Subprocess(bool use_console) : fd_(-1), pid_(-1),
                                            use_console_(use_console) {
 }
+
 Subprocess::~Subprocess() {
   if (fd_ >= 0)
     close(fd_);
@@ -59,14 +60,19 @@
     // Track which fd we use to report errors on.
     int error_pipe = output_pipe[1];
     do {
-      if (sigaction(SIGINT, &set->old_act_, 0) < 0)
+      if (sigaction(SIGINT, &set->old_int_act_, 0) < 0)
+        break;
+      if (sigaction(SIGTERM, &set->old_term_act_, 0) < 0)
         break;
       if (sigprocmask(SIG_SETMASK, &set->old_mask_, 0) < 0)
         break;
 
       if (!use_console_) {
-        // Put the child in its own process group, so ctrl-c won't reach it.
-        if (setpgid(0, 0) < 0)
+        // Put the child in its own session and process group. It will be
+        // detached from the current terminal and ctrl-c won't reach it.
+        // Since this process was just forked, it is not a process group leader
+        // and setsid() will succeed.
+        if (setsid() < 0)
           break;
 
         // Open /dev/null over stdin.
@@ -130,7 +136,7 @@
     if (exit == 0)
       return ExitSuccess;
   } else if (WIFSIGNALED(status)) {
-    if (WTERMSIG(status) == SIGINT)
+    if (WTERMSIG(status) == SIGINT || WTERMSIG(status) == SIGTERM)
       return ExitInterrupted;
   }
   return ExitFailure;
@@ -144,31 +150,48 @@
   return buf_;
 }
 
-bool SubprocessSet::interrupted_;
+int SubprocessSet::interrupted_;
 
 void SubprocessSet::SetInterruptedFlag(int signum) {
-  (void) signum;
-  interrupted_ = true;
+  interrupted_ = signum;
+}
+
+void SubprocessSet::HandlePendingInterruption() {
+  sigset_t pending;
+  sigemptyset(&pending);
+  if (sigpending(&pending) == -1) {
+    perror("ninja: sigpending");
+    return;
+  }
+  if (sigismember(&pending, SIGINT))
+    interrupted_ = SIGINT;
+  else if (sigismember(&pending, SIGTERM))
+    interrupted_ = SIGTERM;
 }
 
 SubprocessSet::SubprocessSet() {
   sigset_t set;
   sigemptyset(&set);
   sigaddset(&set, SIGINT);
+  sigaddset(&set, SIGTERM);
   if (sigprocmask(SIG_BLOCK, &set, &old_mask_) < 0)
     Fatal("sigprocmask: %s", strerror(errno));
 
   struct sigaction act;
   memset(&act, 0, sizeof(act));
   act.sa_handler = SetInterruptedFlag;
-  if (sigaction(SIGINT, &act, &old_act_) < 0)
+  if (sigaction(SIGINT, &act, &old_int_act_) < 0)
+    Fatal("sigaction: %s", strerror(errno));
+  if (sigaction(SIGTERM, &act, &old_term_act_) < 0)
     Fatal("sigaction: %s", strerror(errno));
 }
 
 SubprocessSet::~SubprocessSet() {
   Clear();
 
-  if (sigaction(SIGINT, &old_act_, 0) < 0)
+  if (sigaction(SIGINT, &old_int_act_, 0) < 0)
+    Fatal("sigaction: %s", strerror(errno));
+  if (sigaction(SIGTERM, &old_term_act_, 0) < 0)
     Fatal("sigaction: %s", strerror(errno));
   if (sigprocmask(SIG_SETMASK, &old_mask_, 0) < 0)
     Fatal("sigprocmask: %s", strerror(errno));
@@ -199,16 +222,20 @@
     ++nfds;
   }
 
-  interrupted_ = false;
+  interrupted_ = 0;
   int ret = ppoll(&fds.front(), nfds, NULL, &old_mask_);
   if (ret == -1) {
     if (errno != EINTR) {
       perror("ninja: ppoll");
       return false;
     }
-    return interrupted_;
+    return IsInterrupted();
   }
 
+  HandlePendingInterruption();
+  if (IsInterrupted())
+    return true;
+
   nfds_t cur_nfd = 0;
   for (vector<Subprocess*>::iterator i = running_.begin();
        i != running_.end(); ) {
@@ -227,7 +254,7 @@
     ++i;
   }
 
-  return interrupted_;
+  return IsInterrupted();
 }
 
 #else  // !defined(USE_PPOLL)
@@ -246,16 +273,20 @@
     }
   }
 
-  interrupted_ = false;
+  interrupted_ = 0;
   int ret = pselect(nfds, &set, 0, 0, 0, &old_mask_);
   if (ret == -1) {
     if (errno != EINTR) {
       perror("ninja: pselect");
       return false;
     }
-    return interrupted_;
+    return IsInterrupted();
   }
 
+  HandlePendingInterruption();
+  if (IsInterrupted())
+    return true;
+
   for (vector<Subprocess*>::iterator i = running_.begin();
        i != running_.end(); ) {
     int fd = (*i)->fd_;
@@ -270,7 +301,7 @@
     ++i;
   }
 
-  return interrupted_;
+  return IsInterrupted();
 }
 #endif  // !defined(USE_PPOLL)
 
@@ -285,10 +316,10 @@
 void SubprocessSet::Clear() {
   for (vector<Subprocess*>::iterator i = running_.begin();
        i != running_.end(); ++i)
-    // Since the foreground process is in our process group, it will receive a
-    // SIGINT at the same time as us.
+    // Since the foreground process is in our process group, it will receive
+    // the interruption signal (i.e. SIGINT or SIGTERM) at the same time as us.
     if (!(*i)->use_console_)
-      kill(-(*i)->pid_, SIGINT);
+      kill(-(*i)->pid_, interrupted_);
   for (vector<Subprocess*>::iterator i = running_.begin();
        i != running_.end(); ++i)
     delete *i;
diff --git a/src/subprocess.h b/src/subprocess.h
index b7a1a4c..a001fc9 100644
--- a/src/subprocess.h
+++ b/src/subprocess.h
@@ -89,9 +89,15 @@
   static HANDLE ioport_;
 #else
   static void SetInterruptedFlag(int signum);
-  static bool interrupted_;
+  static void HandlePendingInterruption();
+  /// Store the signal number that causes the interruption.
+  /// 0 if not interruption.
+  static int interrupted_;
 
-  struct sigaction old_act_;
+  static bool IsInterrupted() { return interrupted_ != 0; }
+
+  struct sigaction old_int_act_;
+  struct sigaction old_term_act_;
   sigset_t old_mask_;
 #endif
 };
diff --git a/src/subprocess_test.cc b/src/subprocess_test.cc
index 8a0787c..07cc52f 100644
--- a/src/subprocess_test.cc
+++ b/src/subprocess_test.cc
@@ -16,6 +16,8 @@
 
 #include "test.h"
 
+#include <string>
+
 #ifndef _WIN32
 // SetWithLots need setrlimit.
 #include <stdio.h>
@@ -96,12 +98,47 @@
   ASSERT_FALSE("We should have been interrupted");
 }
 
+TEST_F(SubprocessTest, InterruptChildWithSigTerm) {
+  Subprocess* subproc = subprocs_.Add("kill -TERM $$");
+  ASSERT_NE((Subprocess *) 0, subproc);
+
+  while (!subproc->Done()) {
+    subprocs_.DoWork();
+  }
+
+  EXPECT_EQ(ExitInterrupted, subproc->Finish());
+}
+
+TEST_F(SubprocessTest, InterruptParentWithSigTerm) {
+  Subprocess* subproc = subprocs_.Add("kill -TERM $PPID ; sleep 1");
+  ASSERT_NE((Subprocess *) 0, subproc);
+
+  while (!subproc->Done()) {
+    bool interrupted = subprocs_.DoWork();
+    if (interrupted)
+      return;
+  }
+
+  ASSERT_FALSE("We should have been interrupted");
+}
+
+// A shell command to check if the current process is connected to a terminal.
+// This is different from having stdin/stdout/stderr be a terminal. (For
+// instance consider the command "yes < /dev/null > /dev/null 2>&1".
+// As "ps" will confirm, "yes" could still be connected to a terminal, despite
+// not having any of the standard file descriptors be a terminal.
+static const char kIsConnectedToTerminal[] = "tty < /dev/tty > /dev/null";
+
 TEST_F(SubprocessTest, Console) {
   // Skip test if we don't have the console ourselves.
   if (isatty(0) && isatty(1) && isatty(2)) {
-    Subprocess* subproc = subprocs_.Add("test -t 0 -a -t 1 -a -t 2",
-                                        /*use_console=*/true);
-    ASSERT_NE((Subprocess *) 0, subproc);
+    // Test that stdin, stdout and stderr are a terminal.
+    // Also check that the current process is connected to a terminal.
+    Subprocess* subproc =
+        subprocs_.Add(std::string("test -t 0 -a -t 1 -a -t 2 && ") +
+                          std::string(kIsConnectedToTerminal),
+                      /*use_console=*/true);
+    ASSERT_NE((Subprocess*)0, subproc);
 
     while (!subproc->Done()) {
       subprocs_.DoWork();
@@ -111,6 +148,18 @@
   }
 }
 
+TEST_F(SubprocessTest, NoConsole) {
+  Subprocess* subproc =
+      subprocs_.Add(kIsConnectedToTerminal, /*use_console=*/false);
+  ASSERT_NE((Subprocess*)0, subproc);
+
+  while (!subproc->Done()) {
+    subprocs_.DoWork();
+  }
+
+  EXPECT_NE(ExitSuccess, subproc->Finish());
+}
+
 #endif
 
 TEST_F(SubprocessTest, SetWithSingle) {
@@ -177,8 +226,8 @@
   // Make sure [ulimit -n] isn't going to stop us from working.
   rlimit rlim;
   ASSERT_EQ(0, getrlimit(RLIMIT_NOFILE, &rlim));
-  if (!EXPECT_GT(rlim.rlim_cur, kNumProcs)) {
-    printf("Raise [ulimit -n] well above %u to make this test go\n", kNumProcs);
+  if (rlim.rlim_cur < kNumProcs) {
+    printf("Raise [ulimit -n] well above %u (currently %lu) to make this test go\n", kNumProcs, rlim.rlim_cur);
     return;
   }
 
@@ -196,7 +245,7 @@
   }
   ASSERT_EQ(kNumProcs, subprocs_.finished_.size());
 }
-#endif  // !__APPLE__ && !_WIN32 
+#endif  // !__APPLE__ && !_WIN32
 
 // TODO: this test could work on Windows, just not sure how to simply
 // read stdin.
diff --git a/src/test.cc b/src/test.cc
index f667fef..aed8db7 100644
--- a/src/test.cc
+++ b/src/test.cc
@@ -28,6 +28,7 @@
 #endif
 
 #include "build_log.h"
+#include "graph.h"
 #include "manifest_parser.h"
 #include "util.h"
 
@@ -98,12 +99,45 @@
   string err;
   EXPECT_TRUE(parser.ParseTest(input, &err));
   ASSERT_EQ("", err);
+  VerifyGraph(*state);
 }
 
 void AssertHash(const char* expected, uint64_t actual) {
   ASSERT_EQ(BuildLog::LogEntry::HashCommand(expected), actual);
 }
 
+void VerifyGraph(const State& state) {
+  for (vector<Edge*>::const_iterator e = state.edges_.begin();
+       e != state.edges_.end(); ++e) {
+    // All edges need at least one output.
+    EXPECT_FALSE((*e)->outputs_.empty());
+    // Check that the edge's inputs have the edge as out-edge.
+    for (vector<Node*>::const_iterator in_node = (*e)->inputs_.begin();
+         in_node != (*e)->inputs_.end(); ++in_node) {
+      const vector<Edge*>& out_edges = (*in_node)->out_edges();
+      EXPECT_NE(std::find(out_edges.begin(), out_edges.end(), *e),
+                out_edges.end());
+    }
+    // Check that the edge's outputs have the edge as in-edge.
+    for (vector<Node*>::const_iterator out_node = (*e)->outputs_.begin();
+         out_node != (*e)->outputs_.end(); ++out_node) {
+      EXPECT_EQ((*out_node)->in_edge(), *e);
+    }
+  }
+
+  // The union of all in- and out-edges of each nodes should be exactly edges_.
+  set<const Edge*> node_edge_set;
+  for (State::Paths::const_iterator p = state.paths_.begin();
+       p != state.paths_.end(); ++p) {
+    const Node* n = p->second;
+    if (n->in_edge())
+      node_edge_set.insert(n->in_edge());
+    node_edge_set.insert(n->out_edges().begin(), n->out_edges().end());
+  }
+  set<const Edge*> edge_set(state.edges_.begin(), state.edges_.end());
+  EXPECT_EQ(node_edge_set, edge_set);
+}
+
 void VirtualFileSystem::Create(const string& path,
                                const string& contents) {
   files_[path].mtime = now_;
@@ -111,10 +145,12 @@
   files_created_.insert(path);
 }
 
-TimeStamp VirtualFileSystem::Stat(const string& path) const {
+TimeStamp VirtualFileSystem::Stat(const string& path, string* err) const {
   FileMap::const_iterator i = files_.find(path);
-  if (i != files_.end())
+  if (i != files_.end()) {
+    *err = i->second.stat_error;
     return i->second.mtime;
+  }
   return 0;
 }
 
diff --git a/src/test.h b/src/test.h
index 4c15203..156e68a 100644
--- a/src/test.h
+++ b/src/test.h
@@ -124,6 +124,7 @@
 
 void AssertParse(State* state, const char* input);
 void AssertHash(const char* expected, uint64_t actual);
+void VerifyGraph(const State& state);
 
 /// An implementation of DiskInterface that uses an in-memory representation
 /// of disk state.  It also logs file accesses and directory creations
@@ -141,7 +142,7 @@
   }
 
   // DiskInterface
-  virtual TimeStamp Stat(const string& path) const;
+  virtual TimeStamp Stat(const string& path, string* err) const;
   virtual bool WriteFile(const string& path, const string& contents);
   virtual bool MakeDir(const string& path);
   virtual string ReadFile(const string& path, string* err);
@@ -150,6 +151,7 @@
   /// An entry for a single in-memory file.
   struct Entry {
     int mtime;
+    string stat_error;  // If mtime is -1.
     string contents;
   };
 
diff --git a/src/util.cc b/src/util.cc
index 746d7ed..aa47f2f 100644
--- a/src/util.cc
+++ b/src/util.cc
@@ -14,7 +14,10 @@
 
 #include "util.h"
 
-#ifdef _WIN32
+#ifdef __CYGWIN__
+#include <windows.h>
+#include <io.h>
+#elif defined( _WIN32)
 #include <windows.h>
 #include <io.h>
 #include <share.h>
@@ -497,7 +500,7 @@
 int GetProcessorCount() {
 #ifdef _WIN32
   SYSTEM_INFO info;
-  GetSystemInfo(&info);
+  GetNativeSystemInfo(&info);
   return info.dwNumberOfProcessors;
 #else
   return sysconf(_SC_NPROCESSORS_ONLN);
diff --git a/src/version.cc b/src/version.cc
index 2d2d9c0..14d43f0 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -18,7 +18,7 @@
 
 #include "util.h"
 
-const char* kNinjaVersion = "1.5.3";
+const char* kNinjaVersion = "1.6.0";
 
 void ParseVersion(const string& version, int* major, int* minor) {
   size_t end = version.find('.');