v1.12.0
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 0000000..6fddca0
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,7 @@
+version: 2
+updates:
+  # Maintain dependencies for GitHub Actions
+  - package-ecosystem: "github-actions"
+    directory: "/"
+    schedule:
+      interval: "weekly"
diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml
index 3c93e00..fdebf0b 100644
--- a/.github/workflows/linux.yml
+++ b/.github/workflows/linux.yml
@@ -58,14 +58,14 @@
 
     # Upload ninja binary archive as an artifact
     - name: Upload artifact
-      uses: actions/upload-artifact@v1
+      uses: actions/upload-artifact@v3
       with:
         name: ninja-binary-archives
         path: artifact
 
     - name: Upload release asset
       if: github.event.action == 'published'
-      uses: actions/upload-release-asset@v1.0.1
+      uses: actions/upload-release-asset@v1
       env:
         GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
       with:
@@ -83,7 +83,7 @@
     - name: Install dependencies
       run: |
         apt update
-        apt install -y python3-pytest ninja-build clang-tidy python3-pip clang
+        apt install -y python3-pytest ninja-build clang-tidy python3-pip clang libgtest-dev
         pip3 install cmake==3.17.*
     - name: Configure (GCC)
       run: cmake -Bbuild-gcc -DCMAKE_BUILD_TYPE=Debug -G'Ninja Multi-Config'
@@ -133,7 +133,7 @@
       image: ${{ matrix.image }}
     strategy:
       matrix:
-        image: ['ubuntu:14.04', 'ubuntu:16.04', 'ubuntu:18.04']
+        image: ['ubuntu:20.04', 'ubuntu:22.04', 'ubuntu:24.04']
     steps:
     - uses: actions/checkout@v2
     - name: Install dependencies
@@ -144,6 +144,65 @@
       run: |
         python3 configure.py --bootstrap
         ./ninja all
-        ./ninja_test --gtest_filter=-SubprocessTest.SetWithLots
         python3 misc/ninja_syntax_test.py
         ./misc/output_test.py
+
+  build-aarch64:
+    name: Build Linux ARM64
+    runs-on: [ubuntu-latest]
+    steps:
+    - uses: actions/checkout@v3
+
+    - name: Build
+      uses: uraimo/run-on-arch-action@v2
+      with:
+        arch: aarch64
+        distro: ubuntu18.04
+        githubToken: ${{ github.token }}
+        dockerRunArgs: |
+          --volume "${PWD}:/ninja"
+        install: |
+          apt-get update -q -y
+          apt-get install -q -y make gcc g++ libasan5 clang-tools curl p7zip-full file
+        run: |
+          set -x
+          cd /ninja
+
+          # INSTALL CMAKE
+          CMAKE_VERSION=3.23.4
+          curl -L -O https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-Linux-aarch64.sh
+          chmod +x cmake-${CMAKE_VERSION}-Linux-aarch64.sh
+          ./cmake-${CMAKE_VERSION}-Linux-aarch64.sh --skip-license --prefix=/usr/local
+
+          # BUILD
+          cmake -DCMAKE_BUILD_TYPE=Release -B release-build
+          cmake --build release-build --parallel --config Release
+          strip release-build/ninja
+          file release-build/ninja
+
+          # TEST
+          pushd release-build
+          ./ninja_test
+          popd
+
+          # CREATE ARCHIVE
+          mkdir artifact
+          7z a artifact/ninja-linux-aarch64.zip ./release-build/ninja
+
+    # Upload ninja binary archive as an artifact
+    - name: Upload artifact
+      uses: actions/upload-artifact@v3
+      with:
+        name: ninja-binary-archives
+        path: artifact
+
+    - name: Upload release asset
+      if: github.event.action == 'published'
+      uses: actions/upload-release-asset@v1
+      env:
+        GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+      with:
+        upload_url: ${{ github.event.release.upload_url }}
+        asset_path: ./artifact/ninja-linux-aarch64.zip
+        asset_name: ninja-linux-aarch64.zip
+        asset_content_type: application/zip
diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml
index 0797433..d3dd9ee 100644
--- a/.github/workflows/macos.yml
+++ b/.github/workflows/macos.yml
@@ -8,7 +8,7 @@
 
 jobs:
   build:
-    runs-on: macos-11.0
+    runs-on: macos-12
 
     steps:
     - uses: actions/checkout@v2
@@ -19,7 +19,7 @@
     - name: Build ninja
       shell: bash
       env:
-        MACOSX_DEPLOYMENT_TARGET: 10.12
+        MACOSX_DEPLOYMENT_TARGET: 10.15
       run: |
         cmake -Bbuild -GXcode '-DCMAKE_OSX_ARCHITECTURES=arm64;x86_64'
         cmake --build build --config Release
@@ -36,14 +36,14 @@
 
     # Upload ninja binary archive as an artifact
     - name: Upload artifact
-      uses: actions/upload-artifact@v1
+      uses: actions/upload-artifact@v3
       with:
         name: ninja-binary-archives
         path: artifact
 
     - name: Upload release asset
       if: github.event.action == 'published'
-      uses: actions/upload-release-asset@v1.0.1
+      uses: actions/upload-release-asset@v1
       env:
         GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
       with:
diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml
index e4fe7bd..e169eb4 100644
--- a/.github/workflows/windows.yml
+++ b/.github/workflows/windows.yml
@@ -10,6 +10,15 @@
   build:
     runs-on: windows-latest
 
+    strategy:
+      fail-fast: false
+      matrix:
+        include:
+        - arch: 'x64'
+          suffix: ''
+        - arch: 'arm64'
+          suffix: 'arm64'
+
     steps:
     - uses: actions/checkout@v2
 
@@ -19,15 +28,17 @@
     - name: Build ninja
       shell: bash
       run: |
-        cmake -Bbuild
+        cmake -Bbuild -A ${{ matrix.arch }}
         cmake --build build --parallel --config Debug
         cmake --build build --parallel --config Release
 
     - name: Test ninja (Debug)
+      if: matrix.arch != 'arm64'
       run: .\ninja_test.exe
       working-directory: build/Debug
 
     - name: Test ninja (Release)
+      if: matrix.arch != 'arm64'
       run: .\ninja_test.exe
       working-directory: build/Release
 
@@ -35,22 +46,22 @@
       shell: bash
       run: |
         mkdir artifact
-        7z a artifact/ninja-win.zip ./build/Release/ninja.exe
+        7z a artifact/ninja-win${{ matrix.suffix }}.zip ./build/Release/ninja.exe
 
     # Upload ninja binary archive as an artifact
     - name: Upload artifact
-      uses: actions/upload-artifact@v1
+      uses: actions/upload-artifact@v3
       with:
         name: ninja-binary-archives
         path: artifact
 
     - name: Upload release asset
       if: github.event.action == 'published'
-      uses: actions/upload-release-asset@v1.0.1
+      uses: actions/upload-release-asset@v1
       env:
         GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
       with:
         upload_url: ${{ github.event.release.upload_url }}
-        asset_path: ./artifact/ninja-win.zip
-        asset_name: ninja-win.zip
+        asset_path: ./artifact/ninja-win${{ matrix.suffix }}.zip
+        asset_name: ninja-win${{ matrix.suffix }}.zip
         asset_content_type: application/zip
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 70fc5e9..90e3418 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -3,7 +3,10 @@
 include(CheckSymbolExists)
 include(CheckIPOSupported)
 
-project(ninja)
+option(NINJA_BUILD_BINARY "Build ninja binary" ON)
+option(NINJA_FORCE_PSELECT "Use pselect() even on platforms that provide ppoll()" OFF)
+
+project(ninja CXX)
 
 # --- optional link-time optimization
 check_ipo_supported(RESULT lto_supported OUTPUT error)
@@ -19,6 +22,8 @@
 if(MSVC)
 	set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$<CONFIG:Debug>:Debug>")
 	string(REPLACE "/GR" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
+	# Note that these settings are separately specified in configure.py, and
+	# these lists should be kept in sync.
 	add_compile_options(/W4 /wd4100 /wd4267 /wd4706 /wd4702 /wd4244 /GR- /Zc:__cplusplus)
 	add_compile_definitions(_CRT_SECURE_NO_WARNINGS)
 else()
@@ -31,11 +36,34 @@
 	if(flag_color_diag)
 		add_compile_options(-fdiagnostics-color)
 	endif()
+
+	if(NOT NINJA_FORCE_PSELECT)
+		# Check whether ppoll() is usable on the target platform.
+		# Set -DUSE_PPOLL=1 if this is the case.
+		#
+		# NOTE: Use check_cxx_symbol_exists() instead of check_symbol_exists()
+		# because on Linux, <poll.h> only exposes the symbol when _GNU_SOURCE
+		# is defined.
+		#
+		# Both g++ and clang++ define the symbol by default, because the C++
+		# standard library headers require it, but *not* gcc and clang, which
+		# are used by check_symbol_exists().
+		include(CheckCXXSymbolExists)
+		check_cxx_symbol_exists(ppoll poll.h HAVE_PPOLL)
+		if(HAVE_PPOLL)
+			add_compile_definitions(USE_PPOLL=1)
+		endif()
+	endif()
 endif()
 
 # --- optional re2c
+set(RE2C_MAJOR_VERSION 0)
 find_program(RE2C re2c)
 if(RE2C)
+	execute_process(COMMAND "${RE2C}" --vernum OUTPUT_VARIABLE RE2C_RAW_VERSION)
+	math(EXPR RE2C_MAJOR_VERSION "${RE2C_RAW_VERSION} / 10000")
+endif()
+if(${RE2C_MAJOR_VERSION} GREATER 1)
 	# the depfile parser and ninja lexers are generated using re2c.
 	function(re2c IN OUT)
 		add_custom_command(DEPENDS ${IN} OUTPUT ${OUT}
@@ -46,7 +74,7 @@
 	re2c(${PROJECT_SOURCE_DIR}/src/lexer.in.cc ${PROJECT_BINARY_DIR}/lexer.cc)
 	add_library(libninja-re2c OBJECT ${PROJECT_BINARY_DIR}/depfile_parser.cc ${PROJECT_BINARY_DIR}/lexer.cc)
 else()
-	message(WARNING "re2c was not found; changes to src/*.in.cc will not affect your build.")
+	message(WARNING "re2c 2 or later was not found; changes to src/*.in.cc will not affect your build.")
 	add_library(libninja-re2c OBJECT src/depfile_parser.cc src/lexer.cc)
 endif()
 target_include_directories(libninja-re2c PRIVATE src)
@@ -86,6 +114,8 @@
 
 endfunction()
 
+set(NINJA_PYTHON "python" CACHE STRING "Python interpreter to use for the browse tool")
+
 check_platform_supports_browse_mode(platform_supports_ninja_browse)
 
 # Core source files all build into ninja library.
@@ -124,10 +154,18 @@
 		src/getopt.c
 		src/minidump-win32.cc
 	)
+	# Build getopt.c, which can be compiled as either C or C++, as C++
+	# so that build environments which lack a C compiler, but have a C++
+	# compiler may build ninja.
+	set_source_files_properties(src/getopt.c PROPERTIES LANGUAGE CXX)
 else()
 	target_sources(libninja PRIVATE src/subprocess-posix.cc)
 	if(CMAKE_SYSTEM_NAME STREQUAL "OS400" OR CMAKE_SYSTEM_NAME STREQUAL "AIX")
 		target_sources(libninja PRIVATE src/getopt.c)
+		# Build getopt.c, which can be compiled as either C or C++, as C++
+		# so that build environments which lack a C compiler, but have a C++
+		# compiler may build ninja.
+		set_source_files_properties(src/getopt.c PROPERTIES LANGUAGE CXX)
 	endif()
 
 	# Needed for perfstat_cpu_total
@@ -136,6 +174,8 @@
 	endif()
 endif()
 
+target_compile_features(libninja PUBLIC cxx_std_11)
+
 #Fixes GetActiveProcessorCount on MinGW
 if(MINGW)
 target_compile_definitions(libninja PRIVATE _WIN32_WINNT=0x0601 __USE_MINGW_ANSI_STDIO=1)
@@ -148,11 +188,13 @@
 endif()
 
 # Main executable is library plus main() function.
-add_executable(ninja src/ninja.cc)
-target_link_libraries(ninja PRIVATE libninja libninja-re2c)
+if(NINJA_BUILD_BINARY)
+	add_executable(ninja src/ninja.cc)
+	target_link_libraries(ninja PRIVATE libninja libninja-re2c)
 
-if(WIN32)
-  target_sources(ninja PRIVATE windows/ninja.manifest)
+	if(WIN32)
+		target_sources(ninja PRIVATE windows/ninja.manifest)
+	endif()
 endif()
 
 # Adds browse mode into the ninja binary if it's supported by the host platform.
@@ -171,18 +213,58 @@
 		VERBATIM
 	)
 
-	target_compile_definitions(ninja PRIVATE NINJA_HAVE_BROWSE)
-	target_sources(ninja PRIVATE src/browse.cc)
+	if(NINJA_BUILD_BINARY)
+		target_compile_definitions(ninja PRIVATE NINJA_HAVE_BROWSE)
+		target_sources(ninja PRIVATE src/browse.cc)
+	endif()
 	set_source_files_properties(src/browse.cc
 		PROPERTIES
 			OBJECT_DEPENDS "${PROJECT_BINARY_DIR}/build/browse_py.h"
 			INCLUDE_DIRECTORIES "${PROJECT_BINARY_DIR}"
-			COMPILE_DEFINITIONS NINJA_PYTHON="python"
+			COMPILE_DEFINITIONS NINJA_PYTHON="${NINJA_PYTHON}"
 	)
 endif()
 
 include(CTest)
 if(BUILD_TESTING)
+  find_package(GTest)
+  if(NOT GTest_FOUND)
+    include(FetchContent)
+    FetchContent_Declare(
+      googletest
+      URL https://github.com/google/googletest/archive/release-1.10.0.tar.gz
+      URL_HASH SHA1=9c89be7df9c5e8cb0bc20b3c4b39bf7e82686770
+    )
+    FetchContent_MakeAvailable(googletest)
+
+    # Before googletest-1.11.0, the CMake files provided by the source archive
+    # did not define the GTest::gtest target, only the gtest one, so define
+    # an alias when needed to ensure the rest of this file works with all
+    # GoogleTest releases.
+    #
+    # Note that surprisingly, this is not needed when using GTEST_ROOT to
+    # point to a local installation, because this one contains CMake-generated
+    # files that contain the right target definition, and which will be
+    # picked up by the find_package(GTest) file above.
+    #
+    # This comment and the four lines below can be removed once Ninja only
+    # depends on release-1.11.0 or above.
+    if (NOT TARGET GTest::gtest)
+      message(STATUS "Defining GTest::gtest alias to work-around bug in older release.")
+      add_library(GTest::gtest ALIAS gtest)
+
+      # NOTE: gtest uninit some variables, gcc >= 1.11.3 may cause error on compile.
+      # Remove this comment and six lines below, once ninja deps gtest-1.11.0 or above.
+      if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL "1.11.3")
+        check_cxx_compiler_flag(-Wmaybe-uninitialized flag_maybe_uninit)
+        if (flag_maybe_uninit)
+          target_compile_options(gtest PRIVATE -Wno-maybe-uninitialized)
+        endif()
+      endif()
+
+    endif()
+  endif()
+
   # Tests all build into ninja_test executable.
   add_executable(ninja_test
     src/build_log_test.cc
@@ -207,9 +289,11 @@
     src/util_test.cc
   )
   if(WIN32)
-    target_sources(ninja_test PRIVATE src/includes_normalize_test.cc src/msvc_helper_test.cc)
+    target_sources(ninja_test PRIVATE src/includes_normalize_test.cc src/msvc_helper_test.cc
+      windows/ninja.manifest)
   endif()
-  target_link_libraries(ninja_test PRIVATE libninja libninja-re2c)
+  find_package(Threads REQUIRED)
+  target_link_libraries(ninja_test PRIVATE libninja libninja-re2c GTest::gtest Threads::Threads)
 
   foreach(perftest
     build_log_perftest
@@ -232,4 +316,6 @@
   add_test(NAME NinjaTest COMMAND ninja_test)
 endif()
 
-install(TARGETS ninja)
+if(NINJA_BUILD_BINARY)
+	install(TARGETS ninja)
+endif()
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index be1fc02..37f6ebc 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -14,14 +14,10 @@
 [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html) with
 a few additions:
 
-* Any code merged into the Ninja codebase which will be part of the main
-  executable must compile as C++03. You may use C++11 features in a test or an
-  unimportant tool if you guard your code with `#if __cplusplus >= 201103L`.
 * We have used `using namespace std;` a lot in the past. For new contributions,
   please try to avoid relying on it and instead whenever possible use `std::`.
   However, please do not change existing code simply to add `std::` unless your
   contribution already needs to change that line of code anyway.
-* All source files should have the Google Inc. license header.
 * Use `///` for [Doxygen](http://www.doxygen.nl/) (use `\a` to refer to
   arguments).
 * It's not necessary to document each argument, especially when they're
diff --git a/README.md b/README.md
index d763766..732ef28 100644
--- a/README.md
+++ b/README.md
@@ -7,7 +7,7 @@
 `doc/manual.asciidoc` included in the distribution for background
 and more details.
 
-Binaries for Linux, Mac, and Windows are available at
+Binaries for Linux, Mac and Windows are available on
   [GitHub](https://github.com/ninja-build/ninja/releases).
 Run `./ninja -h` for Ninja help.
 
@@ -49,3 +49,39 @@
 ```
 ./build-cmake/ninja_test
 ```
+
+## Generating documentation
+
+### Ninja Manual
+
+You must have `asciidoc` and `xsltproc` in your PATH, then do:
+
+```
+./configure.py
+ninja manual doc/manual.pdf
+```
+
+Which will generate `doc/manual.html`.
+
+To generate the PDF version of the manual, you must have `dblatext` in your PATH then do:
+
+```
+./configure.py    # only if you didn't do it previously.
+ninja doc/manual.pdf
+```
+
+Which will generate `doc/manual.pdf`.
+
+### Doxygen documentation
+
+If you have `doxygen` installed, you can build documentation extracted from C++
+declarations and comments to help you navigate the code. Note that Ninja is a standalone
+executable, not a library, so there is no public API, all details exposed here are
+internal.
+
+```
+./configure.py   # if needed
+ninja doxygen
+```
+
+Then open `doc/doxygen/html/index.html` in a browser to look at it.
diff --git a/appveyor.yml b/appveyor.yml
index f0b92b8..505e142 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -1,24 +1,21 @@
 version: 1.0.{build}
 image:
   - Visual Studio 2017
-  - Ubuntu1804
+  - Ubuntu2204
 
 environment:
   CLICOLOR_FORCE: 1
   CHERE_INVOKING: 1 # Tell Bash to inherit the current working directory
   matrix:
     - MSYSTEM: MINGW64
-    - MSYSTEM: MSVC
     - MSYSTEM: LINUX
 
 matrix:
   exclude:
     - image: Visual Studio 2017
       MSYSTEM: LINUX
-    - image: Ubuntu1804
+    - image: Ubuntu2204
       MSYSTEM: MINGW64
-    - image: Ubuntu1804
-      MSYSTEM: MSVC
 
 for:
   -
@@ -30,31 +27,13 @@
       pacman -S --quiet --noconfirm --needed re2c 2>&1\n
       ./configure.py --bootstrap --platform mingw 2>&1\n
       ./ninja all\n
-      ./ninja_test 2>&1\n
       ./misc/ninja_syntax_test.py 2>&1\n\"@"
-  -
-    matrix:
-      only:
-        - MSYSTEM: MSVC
-    build_script:
-    - cmd: >-
-        call "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\VC\Auxiliary\Build\vcvars64.bat"
-
-        python configure.py --bootstrap
-
-        ninja.bootstrap.exe all
-
-        ninja_test
-
-        python misc/ninja_syntax_test.py
-
   - matrix:
       only:
-        - image: Ubuntu1804
+        - image: Ubuntu2204
     build_script:
       - ./configure.py --bootstrap
       - ./ninja all
-      - ./ninja_test
       - misc/ninja_syntax_test.py
       - misc/output_test.py
 
diff --git a/configure.py b/configure.py
index 4390434..6ee64a8 100755
--- a/configure.py
+++ b/configure.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
 #
 # Copyright 2001 Google Inc. All Rights Reserved.
 #
@@ -19,12 +19,9 @@
 Projects that use ninja themselves should either write a similar script
 or use a meta-build system that supports Ninja output."""
 
-from __future__ import print_function
-
 from optparse import OptionParser
 import os
-import pipes
-import string
+import shlex
 import subprocess
 import sys
 
@@ -264,7 +261,7 @@
 env_keys = set(['CXX', 'AR', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS'])
 configure_env = dict((k, os.environ[k]) for k in os.environ if k in env_keys)
 if configure_env:
-    config_str = ' '.join([k + '=' + pipes.quote(configure_env[k])
+    config_str = ' '.join([k + '=' + shlex.quote(configure_env[k])
                            for k in configure_env])
     n.variable('configure_env', config_str + '$ ')
 n.newline()
@@ -305,7 +302,18 @@
 else:
     n.variable('ar', configure_env.get('AR', 'ar'))
 
+def search_system_path(file_name):
+  """Find a file in the system path."""
+  for dir in os.environ['path'].split(';'):
+    path = os.path.join(dir, file_name)
+    if os.path.exists(path):
+      return path
+
+# Note that build settings are separately specified in CMakeLists.txt and
+# these lists should be kept in sync.
 if platform.is_msvc():
+    if not search_system_path('cl.exe'):
+        raise Exception('cl.exe not found. Run again from the Developer Command Prompt for VS')
     cflags = ['/showIncludes',
               '/nologo',  # Don't print startup banner.
               '/Zi',  # Create pdb with debug info.
@@ -320,6 +328,7 @@
               # Disable warnings about ignored typedef in DbgHelp.h
               '/wd4091',
               '/GR-',  # Disable RTTI.
+              '/Zc:__cplusplus',
               # Disable size_t -> int truncation warning.
               # We never have strings or arrays larger than 2**31.
               '/wd4267',
@@ -339,6 +348,7 @@
               '-Wno-unused-parameter',
               '-fno-rtti',
               '-fno-exceptions',
+              '-std=c++11',
               '-fvisibility=hidden', '-pipe',
               '-DNINJA_PYTHON="%s"' % options.with_python]
     if options.debug:
@@ -474,7 +484,7 @@
 def has_re2c():
     try:
         proc = subprocess.Popen(['re2c', '-V'], stdout=subprocess.PIPE)
-        return int(proc.communicate()[0], 10) >= 1103
+        return int(proc.communicate()[0], 10) >= 1503
     except OSError:
         return False
 if has_re2c():
@@ -485,20 +495,31 @@
     n.build(src('depfile_parser.cc'), 're2c', src('depfile_parser.in.cc'))
     n.build(src('lexer.cc'), 're2c', src('lexer.in.cc'))
 else:
-    print("warning: A compatible version of re2c (>= 0.11.3) was not found; "
+    print("warning: A compatible version of re2c (>= 0.15.3) was not found; "
            "changes to src/*.in.cc will not affect your build.")
 n.newline()
 
-n.comment('Core source files all build into ninja library.')
 cxxvariables = []
 if platform.is_msvc():
     cxxvariables = [('pdb', 'ninja.pdb')]
+
+n.comment('Generate a library for `ninja-re2c`.')
+re2c_objs = []
+for name in ['depfile_parser', 'lexer']:
+    re2c_objs += cxx(name, variables=cxxvariables)
+if platform.is_msvc():
+    n.build(built('ninja-re2c.lib'), 'ar', re2c_objs)
+else:
+    n.build(built('libninja-re2c.a'), 'ar', re2c_objs)
+n.newline()
+
+n.comment('Core source files all build into ninja library.')
+objs.extend(re2c_objs)
 for name in ['build',
              'build_log',
              'clean',
              'clparser',
              'debug_flags',
-             'depfile_parser',
              'deps_log',
              'disk_interface',
              'dyndep',
@@ -508,7 +529,6 @@
              'graph',
              'graphviz',
              'json',
-             'lexer',
              'line_printer',
              'manifest_parser',
              'metrics',
@@ -562,44 +582,6 @@
     # build.ninja file.
     n = ninja_writer
 
-n.comment('Tests all build into ninja_test executable.')
-
-objs = []
-if platform.is_msvc():
-    cxxvariables = [('pdb', 'ninja_test.pdb')]
-
-for name in ['build_log_test',
-             'build_test',
-             'clean_test',
-             'clparser_test',
-             'depfile_parser_test',
-             'deps_log_test',
-             'dyndep_parser_test',
-             'disk_interface_test',
-             'edit_distance_test',
-             'graph_test',
-             'json_test',
-             'lexer_test',
-             'manifest_parser_test',
-             'missing_deps_test',
-             'ninja_test',
-             'state_test',
-             'status_test',
-             'string_piece_util_test',
-             'subprocess_test',
-             'test',
-             'util_test']:
-    objs += cxx(name, variables=cxxvariables)
-if platform.is_windows():
-    for name in ['includes_normalize_test', 'msvc_helper_test']:
-        objs += cxx(name, variables=cxxvariables)
-
-ninja_test = n.build(binary('ninja_test'), 'link', objs, implicit=ninja_lib,
-                     variables=[('libs', libs)])
-n.newline()
-all_targets += ninja_test
-
-
 n.comment('Ancillary executables.')
 
 if platform.is_aix() and '-maix64' not in ldflags:
diff --git a/doc/manual.asciidoc b/doc/manual.asciidoc
index 659c68b..d7dc490 100644
--- a/doc/manual.asciidoc
+++ b/doc/manual.asciidoc
@@ -1,6 +1,6 @@
 The Ninja build system
 ======================
-v1.11.1, Aug 2022
+v1.12.0, Apr 2024
 
 
 Introduction
@@ -25,7 +25,7 @@
 an assembler.
 
 Build systems get slow when they need to make decisions.  When you are
-in a edit-compile cycle you want it to be as fast as possible -- you
+in an edit-compile cycle you want it to be as fast as possible -- you
 want the build system to do the minimum work necessary to figure out
 what needs to be built immediately.
 
@@ -205,7 +205,11 @@
 `%o`:: Overall rate of finished edges per second
 `%c`:: Current rate of finished edges per second (average over builds
 specified by `-j` or its default)
-`%e`:: Elapsed time in seconds.  _(Available since Ninja 1.2.)_
+`%e`:: Elapsed time in seconds. _(Available since Ninja 1.2.)_
+`%E`:: Remaining time (ETA) in seconds. _(Available since Ninja 1.12.)_
+`%w`:: Elapsed time in [h:]mm:ss format. _(Available since Ninja 1.12.)_
+`%W`:: Remaining time (ETA) in [h:]mm:ss format. _(Available since Ninja 1.12.)_
+`%P`:: The percentage (in ppp% format) of time elapsed out of predicted total runtime. _(Available since Ninja 1.12.)_
 `%%`:: A plain `%` character.
 
 The default progress status is `"[%f/%t] "` (note the trailing space
@@ -223,14 +227,14 @@
 
 `browse`:: browse the dependency graph in a web browser.  Clicking a
 file focuses the view on that file, showing inputs and outputs.  This
-feature requires a Python installation. By default port 8000 is used
+feature requires a Python installation. By default, port 8000 is used
 and a web browser will be opened. This can be changed as follows:
 +
 ----
 ninja -t browse --port=8000 --no-browser mytarget
 ----
 +
-`graph`:: output a file in the syntax used by `graphviz`, a automatic
+`graph`:: output a file in the syntax used by `graphviz`, an automatic
 graph layout tool.  Use it like:
 +
 ----
@@ -262,7 +266,7 @@
 rebuild those targets.
 _Available since Ninja 1.11._
 
-`clean`:: remove built files. By default it removes all built files
+`clean`:: remove built files. By default, it removes all built files
 except for those created by the generator.  Adding the `-g` flag also
 removes built files created by the generator (see <<ref_rule,the rule
 reference for the +generator+ attribute>>).  Additional arguments are
@@ -325,20 +329,19 @@
 for CreateProcessA() on Windows (i.e. a series of zero-terminated strings that
 look like NAME=VALUE, followed by an extra zero terminator). Note that this uses
 the local codepage encoding.
-
++
 This tool also supports a deprecated way of parsing the compiler's output when
-the `/showIncludes` flag is used, and generating a GCC-compatible depfile from it.
+the `/showIncludes` flag is used, and generating a GCC-compatible depfile from it:
 +
----
+----
 ninja -t msvc -o DEPFILE [-p STRING] -- cl.exe /showIncludes <arguments>
----
+----
 +
-
 When using this option, `-p STRING` can be used to pass the localized line prefix
 that `cl.exe` uses to output dependency information. For English-speaking regions
 this is `"Note: including file: "` without the double quotes, but will be different
 for other regions.
-
++
 Note that Ninja supports this natively now, with the use of `deps = msvc` and
 `msvc_deps_prefix` in Ninja files. Native support also avoids launching an extra
 tool process each time the compiler must be called, which can speed up builds
@@ -675,14 +678,14 @@
    as a temporary).
 
 2. `deps = msvc` specifies that the tool outputs header dependencies
-   in the form produced by Visual Studio's compiler's
+   in the form produced by the Visual Studio compiler's
    http://msdn.microsoft.com/en-us/library/hdkef6tk(v=vs.90).aspx[`/showIncludes`
    flag].  Briefly, this means the tool outputs specially-formatted lines
    to its stdout.  Ninja then filters these lines from the displayed
    output.  No `depfile` attribute is necessary, but the localized string
-   in front of the the header file path. For instance
+   in front of the header file path should be globally defined. For instance,
    `msvc_deps_prefix = Note: including file:`
-   for a English Visual Studio (the default). Should be globally defined.
+   for an English Visual Studio (the default).
 +
 ----
 msvc_deps_prefix = Note: including file:
@@ -965,14 +968,14 @@
 
 On Unixes, commands are arrays of arguments.  The Ninja `command`
 variable is passed directly to `sh -c`, which is then responsible for
-interpreting that string into an argv array.  Therefore the quoting
+interpreting that string into an argv array.  Therefore, the quoting
 rules are those of the shell, and you can use all the normal shell
 operators, like `&&` to chain multiple commands, or `VAR=value cmd` to
 set environment variables.
 
 On Windows, commands are strings, so Ninja passes the `command` string
 directly to `CreateProcess`.  (In the common case of simply executing
-a compiler this means there is less overhead.)  Consequently the
+a compiler this means there is less overhead.)  Consequently, the
 quoting rules are determined by the called program, which on Windows
 are usually provided by the C library.  If you need shell
 interpretation of the command (such as the use of `&&` to chain
@@ -1047,6 +1050,9 @@
 [[validations]]
 Validations
 ~~~~~~~~~~~
+
+_Available since Ninja 1.11._
+
 Validations listed on the build line cause the specified files to be
 added to the top level of the build graph (as if they were specified
 on the Ninja command line) whenever the build line is a transitive
@@ -1062,7 +1068,7 @@
 edge depends on the first.
 
 Validations are designed to handle rules that perform error checking but
-don't produce any artifacts needed by the build, for example static
+don't produce any artifacts needed by the build, for example, static
 analysis tools.  Marking the static analysis rule as an implicit input
 of the main build rule of the source files or of the rules that depend
 on the main build rule would slow down the critical path of the build,
diff --git a/doc/style.css b/doc/style.css
index 9976c03..363e272 100644
--- a/doc/style.css
+++ b/doc/style.css
@@ -1,15 +1,22 @@
+:root {
+    color-scheme: light dark;
+}
+
 body {
     margin: 5ex 10ex;
     max-width: 80ex;
     line-height: 1.5;
     font-family: sans-serif;
 }
+
 h1, h2, h3 {
     font-weight: normal;
 }
+
 pre, code {
     font-family: x, monospace;
 }
+
 pre {
     padding: 1ex;
     background: #eee;
@@ -17,13 +24,32 @@
     min-width: 0;
     font-size: 90%;
 }
+@media (prefers-color-scheme: dark) {
+  pre {
+    background: #333;
+    border: solid 1px #444;
+  }
+}
+
 code {
     color: #007;
 }
+@media (prefers-color-scheme: dark) {
+  code {
+    color: #a7cec8;
+  }
+}
+
 div.chapter {
     margin-top: 4em;
     border-top: solid 2px black;
 }
+@media (prefers-color-scheme: dark) {
+  div.chapter {
+    border-top: solid 2px white;
+  }
+}
+
 p {
     margin-top: 0;
 }
diff --git a/misc/measure.py b/misc/measure.py
index 8ce95e6..f3825ef 100755
--- a/misc/measure.py
+++ b/misc/measure.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
 
 # Copyright 2011 Google Inc. All Rights Reserved.
 #
@@ -17,8 +17,6 @@
 """measure the runtime of a command by repeatedly running it.
 """
 
-from __future__ import print_function
-
 import time
 import subprocess
 import sys
diff --git a/misc/ninja-mode.el b/misc/ninja-mode.el
index 8b975d5..d4f06e6 100644
--- a/misc/ninja-mode.el
+++ b/misc/ninja-mode.el
@@ -19,16 +19,22 @@
 ;;; Commentary:
 
 ;; Simple emacs mode for editing .ninja files.
-;; Just some syntax highlighting for now.
 
 ;;; Code:
 
+(defcustom ninja-indent-offset 2
+  "*Amount of offset per level of indentation."
+  :type 'integer
+  :safe 'natnump
+  :group 'ninja)
+
+(defconst ninja-keywords-re
+  (concat "^" (regexp-opt '("rule" "build" "subninja" "include" "pool" "default")
+                          'words)))
+
 (defvar ninja-keywords
-  `((,(concat "^" (regexp-opt '("rule" "build" "subninja" "include"
-                                "pool" "default")
-                              'words))
-     . font-lock-keyword-face)
-    ("\\([[:alnum:]_]+\\) =" 1 font-lock-variable-name-face)
+  `((,ninja-keywords-re . font-lock-keyword-face)
+    ("^[[:space:]]*\\([[:alnum:]_]+\\)[[:space:]]*=" 1 font-lock-variable-name-face)
     ;; Variable expansion.
     ("$[[:alnum:]_]+" . font-lock-variable-name-face)
     ("${[[:alnum:]._]+}" . font-lock-variable-name-face)
@@ -69,11 +75,30 @@
             (unless (= line-end (1+ (buffer-size)))
               (put-text-property line-end (1+ line-end) 'syntax-table '(12)))))))))
 
+(defun ninja-compute-indentation ()
+  "Calculate indentation for the current line."
+  (save-excursion
+    (beginning-of-line)
+    (if (or (looking-at ninja-keywords-re)
+            (= (line-number-at-pos) 1))
+        0
+      (forward-line -1)
+      (if (looking-at ninja-keywords-re)
+          ninja-indent-offset
+        (current-indentation)))))
+
+(defun ninja-indent-line ()
+  "Indent the current line.  Uses previous indentation level if
+ available or `ninja-indent-offset'"
+  (interactive "*")
+  (indent-line-to (ninja-compute-indentation)))
+
 ;;;###autoload
 (define-derived-mode ninja-mode prog-mode "ninja"
   (set (make-local-variable 'comment-start) "#")
   (set (make-local-variable 'parse-sexp-lookup-properties) t)
   (set (make-local-variable 'syntax-propertize-function) #'ninja-syntax-propertize)
+  (set (make-local-variable 'indent-line-function) 'ninja-indent-line)
   (setq font-lock-defaults '(ninja-keywords)))
 
 ;; Run ninja-mode for files ending in .ninja.
diff --git a/misc/ninja_syntax.py b/misc/ninja_syntax.py
index ca73b5b..2aa8456 100644
--- a/misc/ninja_syntax.py
+++ b/misc/ninja_syntax.py
@@ -23,37 +23,54 @@
 
 import re
 import textwrap
+from io import TextIOWrapper
+from typing import Dict, List, Match, Optional, Tuple, Union
 
-def escape_path(word):
+def escape_path(word: str) -> str:
     return word.replace('$ ', '$$ ').replace(' ', '$ ').replace(':', '$:')
 
 class Writer(object):
-    def __init__(self, output, width=78):
+    def __init__(self, output: TextIOWrapper, width: int = 78) -> None:
         self.output = output
         self.width = width
 
-    def newline(self):
+    def newline(self) -> None:
         self.output.write('\n')
 
-    def comment(self, text):
+    def comment(self, text: str) -> None:
         for line in textwrap.wrap(text, self.width - 2, break_long_words=False,
                                   break_on_hyphens=False):
             self.output.write('# ' + line + '\n')
 
-    def variable(self, key, value, indent=0):
+    def variable(
+        self,
+        key: str,
+        value: Optional[Union[bool, int, float, str, List[str]]],
+        indent: int = 0,
+    ) -> None:
         if value is None:
             return
         if isinstance(value, list):
             value = ' '.join(filter(None, value))  # Filter out empty strings.
         self._line('%s = %s' % (key, value), indent)
 
-    def pool(self, name, depth):
+    def pool(self, name: str, depth: int) -> None:
         self._line('pool %s' % name)
         self.variable('depth', depth, indent=1)
 
-    def rule(self, name, command, description=None, depfile=None,
-             generator=False, pool=None, restat=False, rspfile=None,
-             rspfile_content=None, deps=None):
+    def rule(
+        self,
+        name: str,
+        command: str,
+        description: Optional[str] = None,
+        depfile: Optional[str] = None,
+        generator: bool = False,
+        pool: Optional[str] = None,
+        restat: bool = False,
+        rspfile: Optional[str] = None,
+        rspfile_content: Optional[str] = None,
+        deps: Optional[Union[str, List[str]]] = None,
+    ) -> None:
         self._line('rule %s' % name)
         self.variable('command', command, indent=1)
         if description:
@@ -73,8 +90,23 @@
         if deps:
             self.variable('deps', deps, indent=1)
 
-    def build(self, outputs, rule, inputs=None, implicit=None, order_only=None,
-              variables=None, implicit_outputs=None, pool=None, dyndep=None):
+    def build(
+        self,
+        outputs: Union[str, List[str]],
+        rule: str,
+        inputs: Optional[Union[str, List[str]]] = None,
+        implicit: Optional[Union[str, List[str]]] = None,
+        order_only: Optional[Union[str, List[str]]] = None,
+        variables: Optional[
+            Union[
+                List[Tuple[str, Optional[Union[str, List[str]]]]],
+                Dict[str, Optional[Union[str, List[str]]]],
+            ]
+        ] = None,
+        implicit_outputs: Optional[Union[str, List[str]]] = None,
+        pool: Optional[str] = None,
+        dyndep: Optional[str] = None,
+    ) -> List[str]:
         outputs = as_list(outputs)
         out_outputs = [escape_path(x) for x in outputs]
         all_inputs = [escape_path(x) for x in as_list(inputs)]
@@ -111,16 +143,16 @@
 
         return outputs
 
-    def include(self, path):
+    def include(self, path: str) -> None:
         self._line('include %s' % path)
 
-    def subninja(self, path):
+    def subninja(self, path: str) -> None:
         self._line('subninja %s' % path)
 
-    def default(self, paths):
+    def default(self, paths: Union[str, List[str]]) -> None:
         self._line('default %s' % ' '.join(as_list(paths)))
 
-    def _count_dollars_before_index(self, s, i):
+    def _count_dollars_before_index(self, s: str, i: int) -> int:
         """Returns the number of '$' characters right in front of s[i]."""
         dollar_count = 0
         dollar_index = i - 1
@@ -129,7 +161,7 @@
             dollar_index -= 1
         return dollar_count
 
-    def _line(self, text, indent=0):
+    def _line(self, text: str, indent: int = 0) -> None:
         """Write 'text' word-wrapped at self.width characters."""
         leading_space = '  ' * indent
         while len(leading_space) + len(text) > self.width:
@@ -165,11 +197,11 @@
 
         self.output.write(leading_space + text + '\n')
 
-    def close(self):
+    def close(self) -> None:
         self.output.close()
 
 
-def as_list(input):
+def as_list(input: Optional[Union[str, List[str]]]) -> List[str]:
     if input is None:
         return []
     if isinstance(input, list):
@@ -177,7 +209,7 @@
     return [input]
 
 
-def escape(string):
+def escape(string: str) -> str:
     """Escape a string such that it can be embedded into a Ninja file without
     further interpretation."""
     assert '\n' not in string, 'Ninja syntax does not allow newlines'
@@ -185,13 +217,13 @@
     return string.replace('$', '$$')
 
 
-def expand(string, vars, local_vars={}):
+def expand(string: str, vars: Dict[str, str], local_vars: Dict[str, str] = {}) -> str:
     """Expand a string containing $vars as Ninja would.
 
     Note: doesn't handle the full Ninja variable syntax, but it's enough
     to make configure.py's use of it work.
     """
-    def exp(m):
+    def exp(m: Match[str]) -> str:
         var = m.group(1)
         if var == '$':
             return '$'
diff --git a/misc/ninja_syntax_test.py b/misc/ninja_syntax_test.py
index 90ff9c6..61fb177 100755
--- a/misc/ninja_syntax_test.py
+++ b/misc/ninja_syntax_test.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
 
 # Copyright 2011 Google Inc. All Rights Reserved.
 #
diff --git a/misc/output_test.py b/misc/output_test.py
index 141716c..13b0926 100755
--- a/misc/output_test.py
+++ b/misc/output_test.py
@@ -13,29 +13,26 @@
 import unittest
 
 default_env = dict(os.environ)
-if 'NINJA_STATUS' in default_env:
-    del default_env['NINJA_STATUS']
-if 'CLICOLOR_FORCE' in default_env:
-    del default_env['CLICOLOR_FORCE']
+default_env.pop('NINJA_STATUS', None)
+default_env.pop('CLICOLOR_FORCE', None)
 default_env['TERM'] = ''
 NINJA_PATH = os.path.abspath('./ninja')
 
 def run(build_ninja, flags='', pipe=False, env=default_env):
     with tempfile.TemporaryDirectory() as d:
-        os.chdir(d)
-        with open('build.ninja', 'w') as f:
+        with open(os.path.join(d, 'build.ninja'), 'w') as f:
             f.write(build_ninja)
             f.flush()
         ninja_cmd = '{} {}'.format(NINJA_PATH, flags)
         try:
             if pipe:
-                output = subprocess.check_output([ninja_cmd], shell=True, env=env)
+                output = subprocess.check_output([ninja_cmd], shell=True, cwd=d, env=env)
             elif platform.system() == 'Darwin':
                 output = subprocess.check_output(['script', '-q', '/dev/null', 'bash', '-c', ninja_cmd],
-                                                 env=env)
+                                                 cwd=d, env=env)
             else:
                 output = subprocess.check_output(['script', '-qfec', ninja_cmd, '/dev/null'],
-                                                 env=env)
+                                                 cwd=d, env=env)
         except subprocess.CalledProcessError as err:
             sys.stdout.buffer.write(err.output)
             raise err
@@ -112,14 +109,51 @@
 \x1b[31mred\x1b[0m
 ''')
 
+    def test_issue_1966(self):
+        self.assertEqual(run(
+'''rule cat
+  command = cat $rspfile $rspfile > $out
+  rspfile = cat.rsp
+  rspfile_content = a b c
+
+build a: cat
+''', '-j3'),
+'''[1/1] cat cat.rsp cat.rsp > a\x1b[K
+''')
+
+
     def test_pr_1685(self):
         # Running those tools without .ninja_deps and .ninja_log shouldn't fail.
         self.assertEqual(run('', flags='-t recompact'), '')
         self.assertEqual(run('', flags='-t restat'), '')
 
+    def test_issue_2048(self):
+        with tempfile.TemporaryDirectory() as d:
+            with open(os.path.join(d, 'build.ninja'), 'w'):
+                pass
+
+            with open(os.path.join(d, '.ninja_log'), 'w') as f:
+                f.write('# ninja log v4\n')
+
+            try:
+                output = subprocess.check_output([NINJA_PATH, '-t', 'recompact'],
+                                                 cwd=d,
+                                                 env=default_env,
+                                                 stderr=subprocess.STDOUT,
+                                                 text=True
+                                                 )
+
+                self.assertEqual(
+                    output.strip(),
+                    "ninja: warning: build log version is too old; starting over"
+                )
+            except subprocess.CalledProcessError as err:
+                self.fail("non-zero exit code with: " + err.output)
+
     def test_status(self):
         self.assertEqual(run(''), 'ninja: no work to do.\n')
         self.assertEqual(run('', pipe=True), 'ninja: no work to do.\n')
+        self.assertEqual(run('', flags='--quiet'), '')
 
     def test_ninja_status_default(self):
         'Do we show the default status by default?'
diff --git a/misc/write_fake_manifests.py b/misc/write_fake_manifests.py
old mode 100644
new mode 100755
index abcb677..bf9cf7d
--- a/misc/write_fake_manifests.py
+++ b/misc/write_fake_manifests.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
 
 """Writes large manifest files, for manifest parser performance testing.
 
diff --git a/misc/zsh-completion b/misc/zsh-completion
index 4cee3b8..d439df3 100644
--- a/misc/zsh-completion
+++ b/misc/zsh-completion
@@ -16,7 +16,7 @@
 # Add the following to your .zshrc to tab-complete ninja targets
 #   fpath=(path/to/ninja/misc/zsh-completion $fpath)
 
-__get_targets() {
+(( $+functions[_ninja-get-targets] )) || _ninja-get-targets() {
   dir="."
   if [ -n "${opt_args[-C]}" ];
   then
@@ -31,42 +31,45 @@
   eval ${targets_command} 2>/dev/null | cut -d: -f1
 }
 
-__get_tools() {
-  ninja -t list 2>/dev/null | while read -r a b; do echo $a; done | tail -n +2
+(( $+functions[_ninja-get-tools] )) || _ninja-get-tools() {
+  # remove the first line; remove the leading spaces; replace spaces with colon
+  ninja -t list 2> /dev/null | sed -e '1d;s/^ *//;s/ \+/:/'
 }
 
-__get_modes() {
-  ninja -d list 2>/dev/null | while read -r a b; do echo $a; done | tail -n +2 | sed '$d'
+(( $+functions[_ninja-get-modes] )) || _ninja-get-modes() {
+  # remove the first line; remove the last line; remove the leading spaces; replace spaces with colon
+  ninja -d list 2> /dev/null | sed -e '1d;$d;s/^ *//;s/ \+/:/'
 }
 
-__modes() {
+(( $+functions[_ninja-modes] )) || _ninja-modes() {
   local -a modes
-  modes=(${(fo)"$(__get_modes)"})
+  modes=(${(fo)"$(_ninja-get-modes)"})
   _describe 'modes' modes
 }
 
-__tools() {
+(( $+functions[_ninja-tools] )) || _ninja-tools() {
   local -a tools
-  tools=(${(fo)"$(__get_tools)"})
+  tools=(${(fo)"$(_ninja-get-tools)"})
   _describe 'tools' tools
 }
 
-__targets() {
+(( $+functions[_ninja-targets] )) || _ninja-targets() {
   local -a targets
-  targets=(${(fo)"$(__get_targets)"})
+  targets=(${(fo)"$(_ninja-get-targets)"})
   _describe 'targets' targets
 }
 
 _arguments \
-  {-h,--help}'[Show help]' \
-  '--version[Print ninja version]' \
+  '(- *)'{-h,--help}'[Show help]' \
+  '(- *)--version[Print ninja version]' \
   '-C+[Change to directory before doing anything else]:directories:_directories' \
   '-f+[Specify input build file (default=build.ninja)]:files:_files' \
   '-j+[Run N jobs in parallel (default=number of CPUs available)]:number of jobs' \
   '-l+[Do not start new jobs if the load average is greater than N]:number of jobs' \
   '-k+[Keep going until N jobs fail (default=1)]:number of jobs' \
   '-n[Dry run (do not run commands but act like they succeeded)]' \
-  '-v[Show all command lines while building]' \
-  '-d+[Enable debugging (use -d list to list modes)]:modes:__modes' \
-  '-t+[Run a subtool (use -t list to list subtools)]:tools:__tools' \
-  '*::targets:__targets'
+  '(-v --verbose --quiet)'{-v,--verbose}'[Show all command lines while building]' \
+  "(-v --verbose --quiet)--quiet[Don't show progress status, just command output]" \
+  '-d+[Enable debugging (use -d list to list modes)]:modes:_ninja-modes' \
+  '-t+[Run a subtool (use -t list to list subtools)]:tools:_ninja-tools' \
+  '*::targets:_ninja-targets'
diff --git a/src/browse.cc b/src/browse.cc
index 76bee07..ac54207 100644
--- a/src/browse.cc
+++ b/src/browse.cc
@@ -71,8 +71,13 @@
     close(pipefd[0]);
 
     // Write the script file into the stdin of the Python process.
-    ssize_t len = write(pipefd[1], kBrowsePy, sizeof(kBrowsePy));
-    if (len < (ssize_t)sizeof(kBrowsePy))
+    // Only write n - 1 bytes, because Python 3.11 does not allow null
+    // bytes in source code anymore, so avoid writing the null string
+    // terminator.
+    // See https://github.com/python/cpython/issues/96670
+    auto kBrowsePyLength = sizeof(kBrowsePy) - 1;
+    ssize_t len = write(pipefd[1], kBrowsePy, kBrowsePyLength);
+    if (len < (ssize_t)kBrowsePyLength)
       perror("ninja: write");
     close(pipefd[1]);
     exit(0);
diff --git a/src/browse.py b/src/browse.py
index 653cbe9..b125e80 100755
--- a/src/browse.py
+++ b/src/browse.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
 #
 # Copyright 2001 Google Inc. All Rights Reserved.
 #
@@ -20,8 +20,6 @@
 it when needed.
 """
 
-from __future__ import print_function
-
 try:
     import http.server as httpserver
     import socketserver
diff --git a/src/build.cc b/src/build.cc
index 6f11ed7..7e2ccfa 100644
--- a/src/build.cc
+++ b/src/build.cc
@@ -18,6 +18,8 @@
 #include <errno.h>
 #include <stdio.h>
 #include <stdlib.h>
+#include <climits>
+#include <stdint.h>
 #include <functional>
 
 #if defined(__SVR4) && defined(__sun)
@@ -46,7 +48,7 @@
   virtual ~DryRunCommandRunner() {}
 
   // Overridden from CommandRunner:
-  virtual bool CanRunMore() const;
+  virtual size_t CanRunMore() const;
   virtual bool StartCommand(Edge* edge);
   virtual bool WaitForCommand(Result* result);
 
@@ -54,8 +56,8 @@
   queue<Edge*> finished_;
 };
 
-bool DryRunCommandRunner::CanRunMore() const {
-  return true;
+size_t DryRunCommandRunner::CanRunMore() const {
+  return SIZE_MAX;
 }
 
 bool DryRunCommandRunner::StartCommand(Edge* edge) {
@@ -89,21 +91,27 @@
 }
 
 bool Plan::AddTarget(const Node* target, string* err) {
+  targets_.push_back(target);
   return AddSubTarget(target, NULL, err, NULL);
 }
 
 bool Plan::AddSubTarget(const Node* node, const Node* dependent, string* err,
                         set<Edge*>* dyndep_walk) {
   Edge* edge = node->in_edge();
-  if (!edge) {  // Leaf node.
-    if (node->dirty()) {
-      string referenced;
-      if (dependent)
-        referenced = ", needed by '" + dependent->path() + "',";
-      *err = "'" + node->path() + "'" + referenced + " missing "
-             "and no known rule to make it";
-    }
-    return false;
+  if (!edge) {
+     // Leaf node, this can be either a regular input from the manifest
+     // (e.g. a source file), or an implicit input from a depfile or dyndep
+     // file. In the first case, a dirty flag means the file is missing,
+     // and the build should stop. In the second, do not do anything here
+     // since there is no producing edge to add to the plan.
+     if (node->dirty() && !node->generated_by_dep_loader()) {
+       string referenced;
+       if (dependent)
+         referenced = ", needed by '" + dependent->path() + "',";
+       *err = "'" + node->path() + "'" + referenced +
+              " missing and no known rule to make it";
+     }
+     return false;
   }
 
   if (edge->outputs_ready())
@@ -123,8 +131,6 @@
   if (node->dirty() && want == kWantNothing) {
     want = kWantToStart;
     EdgeWanted(edge);
-    if (!dyndep_walk && edge->AllInputsReady())
-      ScheduleWork(want_ins.first);
   }
 
   if (dyndep_walk)
@@ -144,17 +150,20 @@
 
 void Plan::EdgeWanted(const Edge* edge) {
   ++wanted_edges_;
-  if (!edge->is_phony())
+  if (!edge->is_phony()) {
     ++command_edges_;
+    if (builder_)
+      builder_->status_->EdgeAddedToPlan(edge);
+  }
 }
 
 Edge* Plan::FindWork() {
   if (ready_.empty())
     return NULL;
-  EdgeSet::iterator e = ready_.begin();
-  Edge* edge = *e;
-  ready_.erase(e);
-  return edge;
+
+  Edge* work = ready_.top();
+  ready_.pop();
+  return work;
 }
 
 void Plan::ScheduleWork(map<Edge*, Want>::iterator want_e) {
@@ -175,7 +184,7 @@
     pool->RetrieveReadyEdges(&ready_);
   } else {
     pool->EdgeScheduled(*edge);
-    ready_.insert(edge);
+    ready_.push(edge);
   }
 }
 
@@ -294,8 +303,11 @@
 
         want_e->second = kWantNothing;
         --wanted_edges_;
-        if (!(*oe)->is_phony())
+        if (!(*oe)->is_phony()) {
           --command_edges_;
+          if (builder_)
+            builder_->status_->EdgeRemovedFromPlan(*oe);
+        }
       }
     }
   }
@@ -437,6 +449,121 @@
   }
 }
 
+namespace {
+
+template <typename T>
+struct SeenBefore {
+  std::set<const T*>* seen_;
+
+  SeenBefore(std::set<const T*>* seen) : seen_(seen) {}
+
+  bool operator() (const T* item) {
+    // Return true if the item has been seen before
+    return !seen_->insert(item).second;
+  }
+};
+
+// Heuristic for edge priority weighting.
+// Phony edges are free (0 cost), all other edges are weighted equally.
+int64_t EdgeWeightHeuristic(Edge *edge) {
+  return edge->is_phony() ? 0 : 1;
+}
+
+}  // namespace
+
+void Plan::ComputeCriticalPath() {
+  METRIC_RECORD("ComputeCriticalPath");
+  // Remove duplicate targets
+  {
+    std::set<const Node*> seen;
+    SeenBefore<Node> seen_before(&seen);
+    targets_.erase(std::remove_if(targets_.begin(), targets_.end(), seen_before),
+                   targets_.end());
+  }
+
+  // Use backflow algorithm to compute the critical path for all
+  // nodes, starting from the destination nodes.
+  // XXX: ignores pools
+  std::queue<Edge*> work_queue;        // Queue, for breadth-first traversal
+  // The set of edges currently in work_queue, to avoid duplicates.
+  std::set<const Edge*> active_edges;
+  SeenBefore<Edge> seen_edge(&active_edges);
+
+  for (size_t i = 0; i < targets_.size(); ++i) {
+    const Node* target = targets_[i];
+    if (Edge* in = target->in_edge()) {
+      int64_t edge_weight = EdgeWeightHeuristic(in);
+      in->set_critical_path_weight(
+          std::max<int64_t>(edge_weight, in->critical_path_weight()));
+      if (!seen_edge(in)) {
+        work_queue.push(in);
+      }
+    }
+  }
+
+  while (!work_queue.empty()) {
+    Edge* e = work_queue.front();
+    work_queue.pop();
+    // If the critical path of any dependent edges is updated, this
+    // edge may need to be processed again. So re-allow insertion.
+    active_edges.erase(e);
+
+    for (std::vector<Node*>::iterator it = e->inputs_.begin(),
+                                      end = e->inputs_.end();
+         it != end; ++it) {
+      Edge* in = (*it)->in_edge();
+      if (!in) {
+        continue;
+      }
+      // Only process edge if this node offers a higher weighted path
+      const int64_t edge_weight = EdgeWeightHeuristic(in);
+      const int64_t proposed_weight = e->critical_path_weight() + edge_weight;
+      if (proposed_weight > in->critical_path_weight()) {
+        in->set_critical_path_weight(proposed_weight);
+        if (!seen_edge(in)) {
+          work_queue.push(in);
+        }
+      }
+    }
+  }
+}
+
+void Plan::ScheduleInitialEdges() {
+  // Add ready edges to queue.
+  assert(ready_.empty());
+  std::set<Pool*> pools;
+
+  for (std::map<Edge*, Plan::Want>::iterator it = want_.begin(),
+           end = want_.end(); it != end; ++it) {
+    Edge* edge = it->first;
+    Plan::Want want = it->second;
+    if (!(want == kWantToStart && edge->AllInputsReady())) {
+      continue;
+    }
+
+    Pool* pool = edge->pool();
+    if (pool->ShouldDelayEdge()) {
+      pool->DelayEdge(edge);
+      pools.insert(pool);
+    } else {
+      ScheduleWork(it);
+    }
+  }
+
+  // Call RetrieveReadyEdges only once at the end so higher priority
+  // edges are retrieved first, not the ones that happen to be first
+  // in the want_ map.
+  for (std::set<Pool*>::iterator it=pools.begin(),
+           end = pools.end(); it != end; ++it) {
+    (*it)->RetrieveReadyEdges(&ready_);
+  }
+}
+
+void Plan::PrepareQueue() {
+  ComputeCriticalPath();
+  ScheduleInitialEdges();
+}
+
 void Plan::Dump() const {
   printf("pending: %d\n", (int)want_.size());
   for (map<Edge*, Want>::const_iterator e = want_.begin(); e != want_.end(); ++e) {
@@ -450,7 +577,7 @@
 struct RealCommandRunner : public CommandRunner {
   explicit RealCommandRunner(const BuildConfig& config) : config_(config) {}
   virtual ~RealCommandRunner() {}
-  virtual bool CanRunMore() const;
+  virtual size_t CanRunMore() const;
   virtual bool StartCommand(Edge* edge);
   virtual bool WaitForCommand(Result* result);
   virtual vector<Edge*> GetActiveEdges();
@@ -473,12 +600,26 @@
   subprocs_.Clear();
 }
 
-bool RealCommandRunner::CanRunMore() const {
+size_t RealCommandRunner::CanRunMore() const {
   size_t subproc_number =
       subprocs_.running_.size() + subprocs_.finished_.size();
-  return (int)subproc_number < config_.parallelism
-    && ((subprocs_.running_.empty() || config_.max_load_average <= 0.0f)
-        || GetLoadAverage() < config_.max_load_average);
+
+  int64_t capacity = config_.parallelism - subproc_number;
+
+  if (config_.max_load_average > 0.0f) {
+    int load_capacity = config_.max_load_average - GetLoadAverage();
+    if (load_capacity < capacity)
+      capacity = load_capacity;
+  }
+
+  if (capacity < 0)
+    capacity = 0;
+
+  if (capacity == 0 && subprocs_.running_.empty())
+    // Ensure that we make progress.
+    capacity = 1;
+
+  return capacity;
 }
 
 bool RealCommandRunner::StartCommand(Edge* edge) {
@@ -518,6 +659,10 @@
       start_time_millis_(start_time_millis), disk_interface_(disk_interface),
       scan_(state, build_log, deps_log, disk_interface,
             &config_.depfile_parser_options) {
+  lock_file_path_ = ".ninja_lock";
+  string build_dir = state_->bindings_.LookupVariable("builddir");
+  if (!build_dir.empty())
+    lock_file_path_ = build_dir + "/" + lock_file_path_;
 }
 
 Builder::~Builder() {
@@ -552,6 +697,10 @@
         disk_interface_->RemoveFile(depfile);
     }
   }
+
+  string err;
+  if (disk_interface_->Stat(lock_file_path_, &err) > 0)
+    disk_interface_->RemoveFile(lock_file_path_);
 }
 
 Node* Builder::AddTarget(const string& name, string* err) {
@@ -598,8 +747,8 @@
 
 bool Builder::Build(string* err) {
   assert(!AlreadyUpToDate());
+  plan_.PrepareQueue();
 
-  status_->PlanHasTotalEdges(plan_.command_edge_count());
   int pending_commands = 0;
   int failures_allowed = config_.failures_allowed;
 
@@ -621,8 +770,13 @@
   // Second, we attempt to wait for / reap the next finished command.
   while (plan_.more_to_do()) {
     // See if we can start any more commands.
-    if (failures_allowed && command_runner_->CanRunMore()) {
-      if (Edge* edge = plan_.FindWork()) {
+    if (failures_allowed) {
+      size_t capacity = command_runner_->CanRunMore();
+      while (capacity > 0) {
+        Edge* edge = plan_.FindWork();
+        if (!edge)
+          break;
+
         if (edge->GetBindingBool("generator")) {
           scan_.build_log()->Close();
         }
@@ -641,11 +795,19 @@
           }
         } else {
           ++pending_commands;
-        }
 
-        // We made some progress; go back to the main loop.
-        continue;
+          --capacity;
+
+          // Re-evaluate capacity.
+          size_t current_capacity = command_runner_->CanRunMore();
+          if (current_capacity < capacity)
+            capacity = current_capacity;
+        }
       }
+
+       // We are finished with all work items and have no pending
+       // commands. Therefore, break out of the main loop.
+       if (pending_commands == 0 && !plan_.more_to_do()) break;
     }
 
     // See if we can reap any finished commands.
@@ -704,14 +866,25 @@
 
   status_->BuildEdgeStarted(edge, start_time_millis);
 
-  // Create directories necessary for outputs.
+  TimeStamp build_start = -1;
+
+  // Create directories necessary for outputs and remember the current
+  // filesystem mtime to record later
   // XXX: this will block; do we care?
   for (vector<Node*>::iterator o = edge->outputs_.begin();
        o != edge->outputs_.end(); ++o) {
     if (!disk_interface_->MakeDirs((*o)->path()))
       return false;
+    if (build_start == -1) {
+      disk_interface_->WriteFile(lock_file_path_, "");
+      build_start = disk_interface_->Stat(lock_file_path_, err);
+      if (build_start == -1)
+        build_start = 0;
+    }
   }
 
+  edge->command_start_time_ = build_start;
+
   // Create response file, if needed
   // XXX: this may also block; do we care?
   string rspfile = edge->GetUnescapedRspfile();
@@ -761,8 +934,8 @@
   end_time_millis = GetTimeMillis() - start_time_millis_;
   running_edges_.erase(it);
 
-  status_->BuildEdgeFinished(edge, end_time_millis, result->success(),
-                             result->output);
+  status_->BuildEdgeFinished(edge, start_time_millis, end_time_millis,
+                             result->success(), result->output);
 
   // The rest of this function only applies to successful commands.
   if (!result->success()) {
@@ -770,55 +943,38 @@
   }
 
   // Restat the edge outputs
-  TimeStamp output_mtime = 0;
-  bool restat = edge->GetBindingBool("restat");
+  TimeStamp record_mtime = 0;
   if (!config_.dry_run) {
+    const bool restat = edge->GetBindingBool("restat");
+    const bool generator = edge->GetBindingBool("generator");
     bool node_cleaned = false;
+    record_mtime = edge->command_start_time_;
 
-    for (vector<Node*>::iterator o = edge->outputs_.begin();
-         o != edge->outputs_.end(); ++o) {
-      TimeStamp new_mtime = disk_interface_->Stat((*o)->path(), err);
-      if (new_mtime == -1)
-        return false;
-      if (new_mtime > output_mtime)
-        output_mtime = new_mtime;
-      if ((*o)->mtime() == new_mtime && restat) {
-        // The rule command did not change the output.  Propagate the clean
-        // state through the build graph.
-        // Note that this also applies to nonexistent outputs (mtime == 0).
-        if (!plan_.CleanNode(&scan_, *o, err))
+    // restat and generator rules must restat the outputs after the build
+    // has finished. if record_mtime == 0, then there was an error while
+    // attempting to touch/stat the temp file when the edge started and
+    // we should fall back to recording the outputs' current mtime in the
+    // log.
+    if (record_mtime == 0 || restat || generator) {
+      for (vector<Node*>::iterator o = edge->outputs_.begin();
+           o != edge->outputs_.end(); ++o) {
+        TimeStamp new_mtime = disk_interface_->Stat((*o)->path(), err);
+        if (new_mtime == -1)
           return false;
-        node_cleaned = true;
+        if (new_mtime > record_mtime)
+          record_mtime = new_mtime;
+        if ((*o)->mtime() == new_mtime && restat) {
+          // The rule command did not change the output.  Propagate the clean
+          // state through the build graph.
+          // Note that this also applies to nonexistent outputs (mtime == 0).
+          if (!plan_.CleanNode(&scan_, *o, err))
+            return false;
+          node_cleaned = true;
+        }
       }
     }
-
     if (node_cleaned) {
-      TimeStamp restat_mtime = 0;
-      // If any output was cleaned, find the most recent mtime of any
-      // (existing) non-order-only input or the depfile.
-      for (vector<Node*>::iterator i = edge->inputs_.begin();
-           i != edge->inputs_.end() - edge->order_only_deps_; ++i) {
-        TimeStamp input_mtime = disk_interface_->Stat((*i)->path(), err);
-        if (input_mtime == -1)
-          return false;
-        if (input_mtime > restat_mtime)
-          restat_mtime = input_mtime;
-      }
-
-      string depfile = edge->GetUnescapedDepfile();
-      if (restat_mtime != 0 && deps_type.empty() && !depfile.empty()) {
-        TimeStamp depfile_mtime = disk_interface_->Stat(depfile, err);
-        if (depfile_mtime == -1)
-          return false;
-        if (depfile_mtime > restat_mtime)
-          restat_mtime = depfile_mtime;
-      }
-
-      // The total number of edges in the plan may have changed as a result
-      // of a restat.
-      status_->PlanHasTotalEdges(plan_.command_edge_count());
-
-      output_mtime = restat_mtime;
+      record_mtime = edge->command_start_time_;
     }
   }
 
@@ -832,7 +988,7 @@
 
   if (scan_.build_log()) {
     if (!scan_.build_log()->RecordCommand(edge, start_time_millis,
-                                          end_time_millis, output_mtime)) {
+                                          end_time_millis, record_mtime)) {
       *err = string("Error writing to build log: ") + strerror(errno);
       return false;
     }
@@ -932,8 +1088,5 @@
   if (!plan_.DyndepsLoaded(&scan_, node, ddf, err))
     return false;
 
-  // New command edges may have been added to the plan.
-  status_->PlanHasTotalEdges(plan_.command_edge_count());
-
   return true;
 }
diff --git a/src/build.h b/src/build.h
index d697dfb..471f0b2 100644
--- a/src/build.h
+++ b/src/build.h
@@ -18,12 +18,11 @@
 #include <cstdio>
 #include <map>
 #include <memory>
-#include <queue>
 #include <string>
 #include <vector>
 
 #include "depfile_parser.h"
-#include "graph.h"  // XXX needed for DependencyScan; should rearrange.
+#include "graph.h"
 #include "exit_status.h"
 #include "util.h"  // int64_t
 
@@ -76,21 +75,13 @@
   /// Reset state.  Clears want and ready sets.
   void Reset();
 
+  // After all targets have been added, prepares the ready queue for find work.
+  void PrepareQueue();
+
   /// Update the build plan to account for modifications made to the graph
   /// by information loaded from a dyndep file.
   bool DyndepsLoaded(DependencyScan* scan, const Node* node,
                      const DyndepFile& ddf, std::string* err);
-private:
-  bool RefreshDyndepDependents(DependencyScan* scan, const Node* node, std::string* err);
-  void UnmarkDependents(const Node* node, std::set<Node*>* dependents);
-  bool AddSubTarget(const Node* node, const Node* dependent, std::string* err,
-                    std::set<Edge*>* dyndep_walk);
-
-  /// Update plan with knowledge that the given node is up to date.
-  /// If the node is a dyndep binding on any of its dependents, this
-  /// loads dynamic dependencies from the node's path.
-  /// Returns 'false' if loading dyndep info fails and 'true' otherwise.
-  bool NodeFinished(Node* node, std::string* err);
 
   /// Enumerate possible steps we want for an edge.
   enum Want
@@ -105,6 +96,23 @@
     kWantToFinish
   };
 
+private:
+  void ComputeCriticalPath();
+  bool RefreshDyndepDependents(DependencyScan* scan, const Node* node, std::string* err);
+  void UnmarkDependents(const Node* node, std::set<Node*>* dependents);
+  bool AddSubTarget(const Node* node, const Node* dependent, std::string* err,
+                    std::set<Edge*>* dyndep_walk);
+
+  // Add edges that kWantToStart into the ready queue
+  // Must be called after ComputeCriticalPath and before FindWork
+  void ScheduleInitialEdges();
+
+  /// Update plan with knowledge that the given node is up to date.
+  /// If the node is a dyndep binding on any of its dependents, this
+  /// loads dynamic dependencies from the node's path.
+  /// Returns 'false' if loading dyndep info fails and 'true' otherwise.
+  bool NodeFinished(Node* node, std::string* err);
+
   void EdgeWanted(const Edge* edge);
   bool EdgeMaybeReady(std::map<Edge*, Want>::iterator want_e, std::string* err);
 
@@ -119,9 +127,11 @@
   /// we want for the edge.
   std::map<Edge*, Want> want_;
 
-  EdgeSet ready_;
+  EdgePriorityQueue ready_;
 
   Builder* builder_;
+  /// user provided targets in build order, earlier one have higher priority
+  std::vector<const Node*> targets_;
 
   /// Total number of edges that have commands (not phony).
   int command_edges_;
@@ -135,7 +145,7 @@
 /// RealCommandRunner is an implementation that actually runs commands.
 struct CommandRunner {
   virtual ~CommandRunner() {}
-  virtual bool CanRunMore() const = 0;
+  virtual size_t CanRunMore() const = 0;
   virtual bool StartCommand(Edge* edge) = 0;
 
   /// The result of waiting for a command.
@@ -215,11 +225,7 @@
   State* state_;
   const BuildConfig& config_;
   Plan plan_;
-#if __cplusplus < 201703L
-  std::auto_ptr<CommandRunner> command_runner_;
-#else
-  std::unique_ptr<CommandRunner> command_runner_;  // auto_ptr was removed in C++17.
-#endif
+  std::unique_ptr<CommandRunner> command_runner_;
   Status* status_;
 
  private:
@@ -234,6 +240,7 @@
   /// Time the build started.
   int64_t start_time_millis_;
 
+  std::string lock_file_path_;
   DiskInterface* disk_interface_;
   DependencyScan scan_;
 
diff --git a/src/build_log.cc b/src/build_log.cc
index 4dcd6ce..792d1a3 100644
--- a/src/build_log.cc
+++ b/src/build_log.cc
@@ -53,8 +53,8 @@
 namespace {
 
 const char kFileSignature[] = "# ninja log v%d\n";
-const int kOldestSupportedVersion = 4;
-const int kCurrentVersion = 5;
+const int kOldestSupportedVersion = 6;
+const int kCurrentVersion = 6;
 
 // 64bit MurmurHash2, by Austin Appleby
 #if defined(_MSC_VER)
@@ -116,9 +116,9 @@
   : output(output) {}
 
 BuildLog::LogEntry::LogEntry(const string& output, uint64_t command_hash,
-  int start_time, int end_time, TimeStamp restat_mtime)
+  int start_time, int end_time, TimeStamp mtime)
   : output(output), command_hash(command_hash),
-    start_time(start_time), end_time(end_time), mtime(restat_mtime)
+    start_time(start_time), end_time(end_time), mtime(mtime)
 {}
 
 BuildLog::BuildLog()
@@ -279,14 +279,21 @@
     if (!log_version) {
       sscanf(line_start, kFileSignature, &log_version);
 
+      bool invalid_log_version = false;
       if (log_version < kOldestSupportedVersion) {
-        *err = ("build log version invalid, perhaps due to being too old; "
-                "starting over");
+        invalid_log_version = true;
+        *err = "build log version is too old; starting over";
+
+      } else if (log_version > kCurrentVersion) {
+        invalid_log_version = true;
+        *err = "build log version is too new; starting over";
+      }
+      if (invalid_log_version) {
         fclose(file);
         unlink(path.c_str());
-        // Don't report this as a failure.  An empty build log will cause
+        // Don't report this as a failure. A missing build log will cause
         // us to rebuild the outputs anyway.
-        return LOAD_SUCCESS;
+        return LOAD_NOT_FOUND;
       }
     }
 
@@ -303,7 +310,7 @@
     *end = 0;
 
     int start_time = 0, end_time = 0;
-    TimeStamp restat_mtime = 0;
+    TimeStamp mtime = 0;
 
     start_time = atoi(start);
     start = end + 1;
@@ -319,7 +326,7 @@
     if (!end)
       continue;
     *end = 0;
-    restat_mtime = strtoll(start, NULL, 10);
+    mtime = strtoll(start, NULL, 10);
     start = end + 1;
 
     end = (char*)memchr(start, kFieldSeparator, line_end - start);
@@ -343,15 +350,10 @@
 
     entry->start_time = start_time;
     entry->end_time = end_time;
-    entry->mtime = restat_mtime;
-    if (log_version >= 5) {
-      char c = *end; *end = '\0';
-      entry->command_hash = (uint64_t)strtoull(start, NULL, 16);
-      *end = c;
-    } else {
-      entry->command_hash = LogEntry::HashCommand(StringPiece(start,
-                                                              end - start));
-    }
+    entry->mtime = mtime;
+    char c = *end; *end = '\0';
+    entry->command_hash = (uint64_t)strtoull(start, NULL, 16);
+    *end = c;
   }
   fclose(file);
 
diff --git a/src/build_log.h b/src/build_log.h
index 88551e3..dd72c4c 100644
--- a/src/build_log.h
+++ b/src/build_log.h
@@ -73,7 +73,7 @@
 
     explicit LogEntry(const std::string& output);
     LogEntry(const std::string& output, uint64_t command_hash,
-             int start_time, int end_time, TimeStamp restat_mtime);
+             int start_time, int end_time, TimeStamp mtime);
   };
 
   /// Lookup a previously-run command by its output path.
diff --git a/src/build_log_test.cc b/src/build_log_test.cc
index 3718299..12c2dc7 100644
--- a/src/build_log_test.cc
+++ b/src/build_log_test.cc
@@ -104,9 +104,11 @@
 
 TEST_F(BuildLogTest, DoubleEntry) {
   FILE* f = fopen(kTestFilename, "wb");
-  fprintf(f, "# ninja log v4\n");
-  fprintf(f, "0\t1\t2\tout\tcommand abc\n");
-  fprintf(f, "3\t4\t5\tout\tcommand def\n");
+  fprintf(f, "# ninja log v6\n");
+  fprintf(f, "0\t1\t2\tout\t%" PRIx64 "\n",
+      BuildLog::LogEntry::HashCommand("command abc"));
+  fprintf(f, "0\t1\t2\tout\t%" PRIx64 "\n",
+      BuildLog::LogEntry::HashCommand("command def"));
   fclose(f);
 
   string err;
@@ -133,9 +135,13 @@
     log1.RecordCommand(state_.edges_[1], 20, 25);
     log1.Close();
   }
-
+#ifdef __USE_LARGEFILE64
+  struct stat64 statbuf;
+  ASSERT_EQ(0, stat64(kTestFilename, &statbuf));
+#else
   struct stat statbuf;
   ASSERT_EQ(0, stat(kTestFilename, &statbuf));
+#endif
   ASSERT_GT(statbuf.st_size, 0);
 
   // For all possible truncations of the input file, assert that we don't
@@ -169,10 +175,11 @@
   ASSERT_NE(err.find("version"), string::npos);
 }
 
-TEST_F(BuildLogTest, SpacesInOutputV4) {
+TEST_F(BuildLogTest, SpacesInOutput) {
   FILE* f = fopen(kTestFilename, "wb");
-  fprintf(f, "# ninja log v4\n");
-  fprintf(f, "123\t456\t456\tout with space\tcommand\n");
+  fprintf(f, "# ninja log v6\n");
+  fprintf(f, "123\t456\t456\tout with space\t%" PRIx64 "\n",
+      BuildLog::LogEntry::HashCommand("command"));
   fclose(f);
 
   string err;
@@ -193,10 +200,12 @@
   // build log on Windows. This shouldn't crash, and the second version header
   // should be ignored.
   FILE* f = fopen(kTestFilename, "wb");
-  fprintf(f, "# ninja log v4\n");
-  fprintf(f, "123\t456\t456\tout\tcommand\n");
-  fprintf(f, "# ninja log v4\n");
-  fprintf(f, "456\t789\t789\tout2\tcommand2\n");
+  fprintf(f, "# ninja log v6\n");
+  fprintf(f, "123\t456\t456\tout\t%" PRIx64 "\n",
+      BuildLog::LogEntry::HashCommand("command"));
+  fprintf(f, "# ninja log v6\n");
+  fprintf(f, "456\t789\t789\tout2\t%" PRIx64 "\n",
+      BuildLog::LogEntry::HashCommand("command2"));
   fclose(f);
 
   string err;
@@ -243,7 +252,7 @@
 
 TEST_F(BuildLogTest, Restat) {
   FILE* f = fopen(kTestFilename, "wb");
-  fprintf(f, "# ninja log v4\n"
+  fprintf(f, "# ninja log v6\n"
              "1\t2\t3\tout\tcommand\n");
   fclose(f);
   std::string err;
@@ -271,12 +280,13 @@
   // Ninja's build log buffer is currently 256kB. Lines longer than that are
   // silently ignored, but don't affect parsing of other lines.
   FILE* f = fopen(kTestFilename, "wb");
-  fprintf(f, "# ninja log v4\n");
+  fprintf(f, "# ninja log v6\n");
   fprintf(f, "123\t456\t456\tout\tcommand start");
   for (size_t i = 0; i < (512 << 10) / strlen(" more_command"); ++i)
     fputs(" more_command", f);
   fprintf(f, "\n");
-  fprintf(f, "456\t789\t789\tout2\tcommand2\n");
+  fprintf(f, "456\t789\t789\tout2\t%" PRIx64 "\n",
+      BuildLog::LogEntry::HashCommand("command2"));
   fclose(f);
 
   string err;
diff --git a/src/build_test.cc b/src/build_test.cc
index 4ef62b2..61b0b6a 100644
--- a/src/build_test.cc
+++ b/src/build_test.cc
@@ -15,6 +15,8 @@
 #include "build.h"
 
 #include <assert.h>
+#include <climits>
+#include <stdint.h>
 
 #include "build_log.h"
 #include "deps_log.h"
@@ -50,6 +52,14 @@
     sort(ret->begin(), ret->end(), CompareEdgesByOutput::cmp);
   }
 
+  void PrepareForTarget(const char* node, BuildLog *log=NULL) {
+    string err;
+    EXPECT_TRUE(plan_.AddTarget(GetNode(node), &err));
+    ASSERT_EQ("", err);
+    plan_.PrepareQueue();
+    ASSERT_TRUE(plan_.more_to_do());
+  }
+
   void TestPoolWithDepthOne(const char *test_case);
 };
 
@@ -59,10 +69,7 @@
 "build mid: cat in\n"));
   GetNode("mid")->MarkDirty();
   GetNode("out")->MarkDirty();
-  string err;
-  EXPECT_TRUE(plan_.AddTarget(GetNode("out"), &err));
-  ASSERT_EQ("", err);
-  ASSERT_TRUE(plan_.more_to_do());
+  PrepareForTarget("out");
 
   Edge* edge = plan_.FindWork();
   ASSERT_TRUE(edge);
@@ -71,6 +78,7 @@
 
   ASSERT_FALSE(plan_.FindWork());
 
+  string err;
   plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err);
   ASSERT_EQ("", err);
 
@@ -95,15 +103,12 @@
   GetNode("mid1")->MarkDirty();
   GetNode("mid2")->MarkDirty();
   GetNode("out")->MarkDirty();
-
-  string err;
-  EXPECT_TRUE(plan_.AddTarget(GetNode("out"), &err));
-  ASSERT_EQ("", err);
-  ASSERT_TRUE(plan_.more_to_do());
+  PrepareForTarget("out");
 
   Edge* edge;
   edge = plan_.FindWork();
   ASSERT_TRUE(edge);  // cat in
+  string err;
   plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err);
   ASSERT_EQ("", err);
 
@@ -128,14 +133,12 @@
   GetNode("b1")->MarkDirty();
   GetNode("b2")->MarkDirty();
   GetNode("out")->MarkDirty();
-  string err;
-  EXPECT_TRUE(plan_.AddTarget(GetNode("out"), &err));
-  ASSERT_EQ("", err);
-  ASSERT_TRUE(plan_.more_to_do());
+  PrepareForTarget("out");
 
   Edge* edge;
   edge = plan_.FindWork();
   ASSERT_TRUE(edge);  // cat in
+  string err;
   plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err);
   ASSERT_EQ("", err);
 
@@ -169,15 +172,12 @@
   GetNode("a1")->MarkDirty();
   GetNode("a2")->MarkDirty();
   GetNode("out")->MarkDirty();
-
-  string err;
-  EXPECT_TRUE(plan_.AddTarget(GetNode("out"), &err));
-  ASSERT_EQ("", err);
-  ASSERT_TRUE(plan_.more_to_do());
+  PrepareForTarget("out");
 
   Edge* edge;
   edge = plan_.FindWork();
   ASSERT_TRUE(edge);  // cat in
+  string err;
   plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err);
   ASSERT_EQ("", err);
 
@@ -209,6 +209,7 @@
   ASSERT_EQ("", err);
   EXPECT_TRUE(plan_.AddTarget(GetNode("out2"), &err));
   ASSERT_EQ("", err);
+  plan_.PrepareQueue();
   ASSERT_TRUE(plan_.more_to_do());
 
   Edge* edge = plan_.FindWork();
@@ -284,10 +285,7 @@
     GetNode("outb" + string(1, '1' + static_cast<char>(i)))->MarkDirty();
   }
   GetNode("allTheThings")->MarkDirty();
-
-  string err;
-  EXPECT_TRUE(plan_.AddTarget(GetNode("allTheThings"), &err));
-  ASSERT_EQ("", err);
+  PrepareForTarget("allTheThings");
 
   deque<Edge*> edges;
   FindWorkSorted(&edges, 5);
@@ -306,6 +304,7 @@
   ASSERT_EQ("outb3", edge->outputs_[0]->path());
 
   // finish out1
+  string err;
   plan_.EdgeFinished(edges.front(), Plan::kEdgeSucceeded, &err);
   ASSERT_EQ("", err);
   edges.pop_front();
@@ -363,10 +362,7 @@
   GetNode("bar.cpp.obj")->MarkDirty();
   GetNode("libfoo.a")->MarkDirty();
   GetNode("all")->MarkDirty();
-  string err;
-  EXPECT_TRUE(plan_.AddTarget(GetNode("all"), &err));
-  ASSERT_EQ("", err);
-  ASSERT_TRUE(plan_.more_to_do());
+  PrepareForTarget("all");
 
   Edge* edge = NULL;
 
@@ -375,6 +371,7 @@
 
   edge = initial_edges[1];  // Foo first
   ASSERT_EQ("foo.cpp", edge->outputs_[0]->path());
+  string err;
   plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err);
   ASSERT_EQ("", err);
 
@@ -439,6 +436,7 @@
   ASSERT_EQ("", err);
   EXPECT_TRUE(plan_.AddTarget(GetNode("out2"), &err));
   ASSERT_EQ("", err);
+  plan_.PrepareQueue();
   ASSERT_TRUE(plan_.more_to_do());
 
   Edge* edge = plan_.FindWork();
@@ -467,13 +465,63 @@
   ASSERT_EQ(0, edge);
 }
 
+TEST_F(PlanTest, PriorityWithoutBuildLog) {
+  // Without a build log, the critical time is equivalent to graph
+  // depth. Test with the following graph:
+  //   a2
+  //   |
+  //   a1  b1
+  //   |  |  |
+  //   a0 b0 c0
+  //    \ | /
+  //     out
+
+  ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+    "rule r\n"
+    "  command = unused\n"
+    "build out: r a0 b0 c0\n"
+    "build a0: r a1\n"
+    "build a1: r a2\n"
+    "build b0: r b1\n"
+    "build c0: r b1\n"
+  ));
+  GetNode("a1")->MarkDirty();
+  GetNode("a0")->MarkDirty();
+  GetNode("b0")->MarkDirty();
+  GetNode("c0")->MarkDirty();
+  GetNode("out")->MarkDirty();
+  BuildLog log;
+  PrepareForTarget("out", &log);
+
+  EXPECT_EQ(GetNode("out")->in_edge()->critical_path_weight(), 1);
+  EXPECT_EQ(GetNode("a0")->in_edge()->critical_path_weight(), 2);
+  EXPECT_EQ(GetNode("b0")->in_edge()->critical_path_weight(), 2);
+  EXPECT_EQ(GetNode("c0")->in_edge()->critical_path_weight(), 2);
+  EXPECT_EQ(GetNode("a1")->in_edge()->critical_path_weight(), 3);
+
+  const int n_edges = 5;
+  const char *expected_order[n_edges] = {
+    "a1", "a0", "b0", "c0", "out"};
+  for (int i = 0; i < n_edges; ++i) {
+    Edge* edge = plan_.FindWork();
+    ASSERT_TRUE(edge != nullptr);
+    EXPECT_EQ(expected_order[i], edge->outputs_[0]->path());
+
+    std::string err;
+    ASSERT_TRUE(plan_.EdgeFinished(edge, Plan::kEdgeSucceeded, &err));
+    EXPECT_EQ(err, "");
+  }
+
+  EXPECT_FALSE(plan_.FindWork());
+}
+
 /// Fake implementation of CommandRunner, useful for tests.
 struct FakeCommandRunner : public CommandRunner {
   explicit FakeCommandRunner(VirtualFileSystem* fs) :
       max_active_edges_(1), fs_(fs) {}
 
   // CommandRunner impl
-  virtual bool CanRunMore() const;
+  virtual size_t CanRunMore() const;
   virtual bool StartCommand(Edge* edge);
   virtual bool WaitForCommand(Result* result);
   virtual vector<Edge*> GetActiveEdges();
@@ -574,8 +622,11 @@
   builder.command_runner_.release();
 }
 
-bool FakeCommandRunner::CanRunMore() const {
-  return active_edges_.size() < max_active_edges_;
+size_t FakeCommandRunner::CanRunMore() const {
+  if (active_edges_.size() < max_active_edges_)
+    return SIZE_MAX;
+
+  return 0;
 }
 
 bool FakeCommandRunner::StartCommand(Edge* edge) {
@@ -611,6 +662,7 @@
       fs_->WriteFile(edge->outputs_[0]->path(), content);
   } else if (edge->rule().name() == "touch-implicit-dep-out") {
     string dep = edge->GetBinding("test_dependency");
+    fs_->Tick();
     fs_->Create(dep, "");
     fs_->Tick();
     for (vector<Node*>::iterator out = edge->outputs_.begin();
@@ -627,7 +679,12 @@
     fs_->Create(dep, "");
   } else if (edge->rule().name() == "generate-depfile") {
     string dep = edge->GetBinding("test_dependency");
+    bool touch_dep = edge->GetBindingBool("touch_dependency");
     string depfile = edge->GetUnescapedDepfile();
+    if (touch_dep) {
+      fs_->Tick();
+      fs_->Create(dep, "");
+    }
     string contents;
     for (vector<Node*>::iterator out = edge->outputs_.begin();
          out != edge->outputs_.end(); ++out) {
@@ -635,6 +692,20 @@
       fs_->Create((*out)->path(), "");
     }
     fs_->Create(depfile, contents);
+  } else if (edge->rule().name() == "long-cc") {
+    string dep = edge->GetBinding("test_dependency");
+    string depfile = edge->GetUnescapedDepfile();
+    string contents;
+    for (vector<Node*>::iterator out = edge->outputs_.begin();
+        out != edge->outputs_.end(); ++out) {
+      fs_->Tick();
+      fs_->Tick();
+      fs_->Tick();
+      fs_->Create((*out)->path(), "");
+      contents += (*out)->path() + ": " + dep + "\n";
+    }
+    if (!dep.empty() && !depfile.empty())
+      fs_->Create(depfile, contents);
   } else {
     printf("unknown command\n");
     return false;
@@ -690,6 +761,18 @@
   else
     result->status = ExitSuccess;
 
+  // This rule simulates an external process modifying files while the build command runs.
+  // See TestInputMtimeRaceCondition and TestInputMtimeRaceConditionWithDepFile.
+  // Note: only the first and third time the rule is run per test is the file modified, so
+  // the test can verify that subsequent runs without the race have no work to do.
+  if (edge->rule().name() == "long-cc") {
+    string dep = edge->GetBinding("test_dependency");
+    if (fs_->now_ == 4)
+      fs_->files_[dep].mtime = 3;
+    if (fs_->now_ == 10)
+      fs_->files_[dep].mtime = 9;
+  }
+
   // Provide a way for test cases to verify when an edge finishes that
   // some other edge is still active.  This is useful for test cases
   // covering behavior involving multiple active edges.
@@ -954,9 +1037,19 @@
   ASSERT_EQ(1u, fs_.files_read_.size());
   EXPECT_EQ("foo.o.d", fs_.files_read_[0]);
 
-  // Expect three new edges: one generating foo.o, and two more from
-  // loading the depfile.
-  ASSERT_EQ(orig_edges + 3, (int)state_.edges_.size());
+  // Expect one new edge generating foo.o. Loading the depfile should have
+  // added nodes, but not phony edges to the graph.
+  ASSERT_EQ(orig_edges + 1, (int)state_.edges_.size());
+
+  // Verify that nodes for blah.h and bar.h were added and that they
+  // are marked as generated by a dep loader.
+  ASSERT_FALSE(state_.LookupNode("foo.o")->generated_by_dep_loader());
+  ASSERT_FALSE(state_.LookupNode("foo.c")->generated_by_dep_loader());
+  ASSERT_TRUE(state_.LookupNode("blah.h"));
+  ASSERT_TRUE(state_.LookupNode("blah.h")->generated_by_dep_loader());
+  ASSERT_TRUE(state_.LookupNode("bar.h"));
+  ASSERT_TRUE(state_.LookupNode("bar.h")->generated_by_dep_loader());
+
   // Expect our edge to now have three inputs: foo.c and two headers.
   ASSERT_EQ(3u, edge->inputs_.size());
 
@@ -1122,7 +1215,6 @@
   ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
 "rule cc\n  command = cc $in\n  depfile = $out.d\n"
 "build gen/stuff\\things/foo.o: cc x\\y/z\\foo.c\n"));
-  Edge* edge = state_.edges_.back();
 
   fs_.Create("x/y/z/foo.c", "");
   GetNode("bar.h")->MarkDirty();  // Mark bar.h as missing.
@@ -1135,10 +1227,10 @@
   // The depfile path does not get Canonicalize as it seems unnecessary.
   EXPECT_EQ("gen/stuff\\things/foo.o.d", fs_.files_read_[0]);
 
-  // Expect three new edges: one generating foo.o, and two more from
-  // loading the depfile.
-  ASSERT_EQ(orig_edges + 3, (int)state_.edges_.size());
+  // Expect one new edge enerating foo.o.
+  ASSERT_EQ(orig_edges + 1, (int)state_.edges_.size());
   // Expect our edge to now have three inputs: foo.c and two headers.
+  Edge* edge = state_.edges_.back();
   ASSERT_EQ(3u, edge->inputs_.size());
 
   // Expect the command line we generate to only use the original input, and
@@ -1471,7 +1563,7 @@
 TEST_F(BuildWithLogTest, ImplicitGeneratedOutOfDate2) {
   ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
 "rule touch-implicit-dep-out\n"
-"  command = touch $test_dependency ; sleep 1 ; touch $out\n"
+"  command = sleep 1 ; touch $test_dependency ; sleep 1 ; touch $out\n"
 "  generator = 1\n"
 "build out.imp: touch-implicit-dep-out | inimp inimp2\n"
 "  test_dependency = inimp\n"));
@@ -1497,6 +1589,29 @@
   EXPECT_TRUE(builder_.AddTarget("out.imp", &err));
   EXPECT_TRUE(builder_.AlreadyUpToDate());
   EXPECT_FALSE(GetNode("out.imp")->dirty());
+
+  command_runner_.commands_ran_.clear();
+  state_.Reset();
+  builder_.Cleanup();
+  builder_.plan_.Reset();
+
+  fs_.Tick();
+  fs_.Create("inimp", "");
+
+  EXPECT_TRUE(builder_.AddTarget("out.imp", &err));
+  EXPECT_FALSE(builder_.AlreadyUpToDate());
+
+  EXPECT_TRUE(builder_.Build(&err));
+  EXPECT_TRUE(builder_.AlreadyUpToDate());
+
+  command_runner_.commands_ran_.clear();
+  state_.Reset();
+  builder_.Cleanup();
+  builder_.plan_.Reset();
+
+  EXPECT_TRUE(builder_.AddTarget("out.imp", &err));
+  EXPECT_TRUE(builder_.AlreadyUpToDate());
+  EXPECT_FALSE(GetNode("out.imp")->dirty());
 }
 
 TEST_F(BuildWithLogTest, NotInLogButOnDisk) {
@@ -1800,6 +1915,52 @@
   ASSERT_EQ(restat_mtime, log_entry->mtime);
 }
 
+TEST_F(BuildWithLogTest, RestatInputChangesDueToRule) {
+  ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule generate-depfile\n"
+"  command = sleep 1 ; touch $touch_dependency; touch $out ; echo \"$out: $test_dependency\" > $depfile\n"
+"build out1: generate-depfile || cat1\n"
+"  test_dependency = in2\n"
+"  touch_dependency = 1\n"
+"  restat = 1\n"
+"  depfile = out.d\n"));
+
+  // Perform the first build. out1 is a restat rule, so its recorded mtime in the build
+  // log should be the time the command completes, not the time the command started. One
+  // of out1's discovered dependencies will have a newer mtime than when out1 started
+  // running, due to its command touching the dependency itself.
+  string err;
+  EXPECT_TRUE(builder_.AddTarget("out1", &err));
+  ASSERT_EQ("", err);
+  EXPECT_TRUE(builder_.Build(&err));
+  ASSERT_EQ("", err);
+  EXPECT_EQ(2u, command_runner_.commands_ran_.size());
+  EXPECT_EQ(2u, builder_.plan_.command_edge_count());
+  BuildLog::LogEntry* log_entry = build_log_.LookupByOutput("out1");
+  ASSERT_TRUE(NULL != log_entry);
+  ASSERT_EQ(2u, log_entry->mtime);
+
+  command_runner_.commands_ran_.clear();
+  state_.Reset();
+  builder_.Cleanup();
+  builder_.plan_.Reset();
+
+  fs_.Tick();
+  fs_.Create("in1", "");
+
+  // Touching a dependency of an order-only dependency of out1 should not cause out1 to
+  // rebuild. If out1 were not a restat rule, then it would rebuild here because its
+  // recorded mtime would have been an earlier mtime than its most recent input's (in2)
+  // mtime
+  EXPECT_TRUE(builder_.AddTarget("out1", &err));
+  ASSERT_EQ("", err);
+  EXPECT_TRUE(!state_.GetNode("out1", 0)->dirty());
+  EXPECT_TRUE(builder_.Build(&err));
+  ASSERT_EQ("", err);
+  EXPECT_EQ(1u, command_runner_.commands_ran_.size());
+  EXPECT_EQ(1u, builder_.plan_.command_edge_count());
+}
+
 TEST_F(BuildWithLogTest, GeneratedPlainDepfileMtime) {
   ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
 "rule generate-depfile\n"
@@ -1904,10 +2065,11 @@
   EXPECT_TRUE(builder_.Build(&err));
   ASSERT_EQ(3u, command_runner_.commands_ran_.size());
 
-  // The RSP files were created
-  ASSERT_EQ(files_created + 2, fs_.files_created_.size());
+  // The RSP files and temp file to acquire output mtimes were created
+  ASSERT_EQ(files_created + 3, fs_.files_created_.size());
   ASSERT_EQ(1u, fs_.files_created_.count("out 2.rsp"));
   ASSERT_EQ(1u, fs_.files_created_.count("out 3.rsp"));
+  ASSERT_EQ(1u, fs_.files_created_.count(".ninja_lock"));
 
   // The RSP files were removed
   ASSERT_EQ(files_removed + 2, fs_.files_removed_.size());
@@ -1941,9 +2103,10 @@
   ASSERT_EQ("subcommand failed", err);
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
 
-  // The RSP file was created
-  ASSERT_EQ(files_created + 1, fs_.files_created_.size());
+  // The RSP file and temp file to acquire output mtimes were created
+  ASSERT_EQ(files_created + 2, fs_.files_created_.size());
   ASSERT_EQ(1u, fs_.files_created_.count("out.rsp"));
+  ASSERT_EQ(1u, fs_.files_created_.count(".ninja_lock"));
 
   // The RSP file was NOT removed
   ASSERT_EQ(files_removed, fs_.files_removed_.size());
@@ -2094,11 +2257,28 @@
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
 }
 
-TEST_F(BuildTest, StatusFormatElapsed) {
+TEST_F(BuildTest, StatusFormatElapsed_e) {
   status_.BuildStarted();
   // Before any task is done, the elapsed time must be zero.
-  EXPECT_EQ("[%/e0.000]",
-            status_.FormatProgressStatus("[%%/e%e]", 0));
+  EXPECT_EQ("[%/e0.000]", status_.FormatProgressStatus("[%%/e%e]", 0));
+}
+
+TEST_F(BuildTest, StatusFormatElapsed_w) {
+  status_.BuildStarted();
+  // Before any task is done, the elapsed time must be zero.
+  EXPECT_EQ("[%/e00:00]", status_.FormatProgressStatus("[%%/e%w]", 0));
+}
+
+TEST_F(BuildTest, StatusFormatETA) {
+  status_.BuildStarted();
+  // Before any task is done, the ETA time must be unknown.
+  EXPECT_EQ("[%/E?]", status_.FormatProgressStatus("[%%/E%E]", 0));
+}
+
+TEST_F(BuildTest, StatusFormatTimeProgress) {
+  status_.BuildStarted();
+  // Before any task is done, the percentage of elapsed time must be zero.
+  EXPECT_EQ("[%/p  0%]", status_.FormatProgressStatus("[%%/p%p]", 0));
 }
 
 TEST_F(BuildTest, StatusFormatReplacePlaceholder) {
@@ -2125,8 +2305,8 @@
 }
 
 struct BuildWithQueryDepsLogTest : public BuildTest {
-  BuildWithQueryDepsLogTest() : BuildTest(&log_) {
-  }
+  BuildWithQueryDepsLogTest()
+      : BuildTest(&log_), deps_log_file_("ninja_deps") {}
 
   ~BuildWithQueryDepsLogTest() {
     log_.Close();
@@ -2138,12 +2318,13 @@
     temp_dir_.CreateAndEnter("BuildWithQueryDepsLogTest");
 
     std::string err;
-    ASSERT_TRUE(log_.OpenForWrite("ninja_deps", &err));
+    ASSERT_TRUE(log_.OpenForWrite(deps_log_file_.path(), &err));
     ASSERT_EQ("", err);
   }
 
   ScopedTempDir temp_dir_;
 
+  ScopedFilePath deps_log_file_;
   DepsLog log_;
 };
 
@@ -2337,7 +2518,8 @@
 /// builder_ it sets up, because we want pristine objects for
 /// each build.
 struct BuildWithDepsLogTest : public BuildTest {
-  BuildWithDepsLogTest() {}
+  BuildWithDepsLogTest()
+      : build_log_file_("build_log"), deps_log_file_("ninja_deps") {}
 
   virtual void SetUp() {
     BuildTest::SetUp();
@@ -2350,6 +2532,8 @@
   }
 
   ScopedTempDir temp_dir_;
+  ScopedFilePath build_log_file_;
+  ScopedFilePath deps_log_file_;
 
   /// Shadow parent class builder_ so we don't accidentally use it.
   void* builder_;
@@ -2363,6 +2547,7 @@
       "build out: cat in1\n"
       "  deps = gcc\n"
       "  depfile = in1.d\n";
+
   {
     State state;
     ASSERT_NO_FATAL_FAILURE(AddCatRule(&state));
@@ -2370,7 +2555,7 @@
 
     // Run the build once, everything should be ok.
     DepsLog deps_log;
-    ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+    ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err));
     ASSERT_EQ("", err);
 
     Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0);
@@ -2400,8 +2585,8 @@
 
     // Run the build again.
     DepsLog deps_log;
-    ASSERT_TRUE(deps_log.Load("ninja_deps", &state, &err));
-    ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+    ASSERT_TRUE(deps_log.Load(deps_log_file_.path(), &state, &err));
+    ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err));
 
     Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0);
     builder.command_runner_.reset(&command_runner_);
@@ -2441,7 +2626,7 @@
 
     // Run the build once, everything should be ok.
     DepsLog deps_log;
-    ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+    ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err));
     ASSERT_EQ("", err);
 
     Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0);
@@ -2470,8 +2655,8 @@
     ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
 
     DepsLog deps_log;
-    ASSERT_TRUE(deps_log.Load("ninja_deps", &state, &err));
-    ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+    ASSERT_TRUE(deps_log.Load(deps_log_file_.path(), &state, &err));
+    ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err));
 
     Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0);
     builder.command_runner_.reset(&command_runner_);
@@ -2522,6 +2707,210 @@
   builder.command_runner_.release();
 }
 
+TEST_F(BuildWithDepsLogTest, TestInputMtimeRaceCondition) {
+  string err;
+  const char* manifest =
+      "rule long-cc\n"
+      "  command = long-cc\n"
+      "build out: long-cc in1\n"
+      "  test_dependency = in1\n";
+
+  State state;
+  ASSERT_NO_FATAL_FAILURE(AddCatRule(&state));
+  ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
+
+  BuildLog build_log;
+  ASSERT_TRUE(build_log.Load(build_log_file_.path(), &err));
+  ASSERT_TRUE(build_log.OpenForWrite(build_log_file_.path(), *this, &err));
+
+  DepsLog deps_log;
+  ASSERT_TRUE(deps_log.Load(deps_log_file_.path(), &state, &err));
+  ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err));
+
+  BuildLog::LogEntry* log_entry = NULL;
+  {
+    Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0);
+    builder.command_runner_.reset(&command_runner_);
+    command_runner_.commands_ran_.clear();
+
+    // Run the build, out gets built, dep file is created
+    EXPECT_TRUE(builder.AddTarget("out", &err));
+    ASSERT_EQ("", err);
+    EXPECT_TRUE(builder.Build(&err));
+    ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+
+    // See that an entry in the logfile is created. the input_mtime is 1 since that was
+    // the mtime of in1 when the command was started
+    log_entry = build_log.LookupByOutput("out");
+    ASSERT_TRUE(NULL != log_entry);
+    ASSERT_EQ(1u, log_entry->mtime);
+
+    builder.command_runner_.release();
+  }
+
+  {
+    Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0);
+    builder.command_runner_.reset(&command_runner_);
+    command_runner_.commands_ran_.clear();
+
+    // Trigger the build again - "out" should rebuild despite having a newer mtime than
+    // "in1", since "in1" was touched during the build of out (simulated by changing its
+    // mtime in the the test builder's WaitForCommand() which runs before FinishCommand()
+    command_runner_.commands_ran_.clear();
+    state.Reset();
+    EXPECT_TRUE(builder.AddTarget("out", &err));
+    ASSERT_EQ("", err);
+    EXPECT_TRUE(builder.Build(&err));
+    ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+
+    // Check that the logfile entry is still correct
+    log_entry = build_log.LookupByOutput("out");
+    ASSERT_TRUE(NULL != log_entry);
+    ASSERT_TRUE(fs_.files_["in1"].mtime < log_entry->mtime);
+    builder.command_runner_.release();
+  }
+
+  {
+    Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0);
+    builder.command_runner_.reset(&command_runner_);
+    command_runner_.commands_ran_.clear();
+
+    // And a subsequent run should not have any work to do
+    command_runner_.commands_ran_.clear();
+    state.Reset();
+    EXPECT_TRUE(builder.AddTarget("out", &err));
+    ASSERT_EQ("", err);
+    EXPECT_TRUE(builder.AlreadyUpToDate());
+
+    builder.command_runner_.release();
+  }
+}
+
+TEST_F(BuildWithDepsLogTest, TestInputMtimeRaceConditionWithDepFile) {
+  string err;
+  const char* manifest =
+      "rule long-cc\n"
+      "  command = long-cc\n"
+      "build out: long-cc\n"
+      "  deps = gcc\n"
+      "  depfile = out.d\n"
+      "  test_dependency = header.h\n";
+
+  fs_.Create("header.h", "");
+
+  State state;
+  ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
+
+  BuildLog build_log;
+  ASSERT_TRUE(build_log.Load(build_log_file_.path(), &err));
+  ASSERT_TRUE(build_log.OpenForWrite(build_log_file_.path(), *this, &err));
+
+  DepsLog deps_log;
+  ASSERT_TRUE(deps_log.Load(deps_log_file_.path(), &state, &err));
+  ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err));
+
+  {
+    Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0);
+    builder.command_runner_.reset(&command_runner_);
+
+    // Run the build, out gets built, dep file is created
+    EXPECT_TRUE(builder.AddTarget("out", &err));
+    ASSERT_EQ("", err);
+    EXPECT_TRUE(builder.Build(&err));
+    ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+
+    // See that an entry in the logfile is created. the mtime is 1 due to the command
+    // starting when the file system's mtime was 1.
+    BuildLog::LogEntry* log_entry = build_log.LookupByOutput("out");
+    ASSERT_TRUE(NULL != log_entry);
+    ASSERT_EQ(1u, log_entry->mtime);
+
+    builder.command_runner_.release();
+  }
+
+  {
+    // Trigger the build again - "out" will rebuild since its newest input mtime (header.h)
+    // is newer than the recorded mtime of out in the build log
+    Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0);
+    builder.command_runner_.reset(&command_runner_);
+    command_runner_.commands_ran_.clear();
+
+    state.Reset();
+    EXPECT_TRUE(builder.AddTarget("out", &err));
+    ASSERT_EQ("", err);
+    EXPECT_TRUE(builder.Build(&err));
+    ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+
+    builder.command_runner_.release();
+  }
+
+  {
+    // Trigger the build again - "out" won't rebuild since the file wasn't updated during
+    // the previous build
+    Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0);
+    builder.command_runner_.reset(&command_runner_);
+    command_runner_.commands_ran_.clear();
+
+    state.Reset();
+    EXPECT_TRUE(builder.AddTarget("out", &err));
+    ASSERT_EQ("", err);
+    ASSERT_TRUE(builder.AlreadyUpToDate());
+
+    builder.command_runner_.release();
+  }
+
+  // touch the header to trigger a rebuild
+  fs_.Create("header.h", "");
+  ASSERT_EQ(fs_.now_, 7);
+
+  {
+    // Rebuild. This time, long-cc will cause header.h to be updated while the build is
+    // in progress
+    Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0);
+    builder.command_runner_.reset(&command_runner_);
+    command_runner_.commands_ran_.clear();
+
+    state.Reset();
+    EXPECT_TRUE(builder.AddTarget("out", &err));
+    ASSERT_EQ("", err);
+    EXPECT_TRUE(builder.Build(&err));
+    ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+
+    builder.command_runner_.release();
+  }
+
+  {
+    // Rebuild. Because header.h is now in the deplog for out, it should be detectable as
+    // a change-while-in-progress and should cause a rebuild of out.
+    Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0);
+    builder.command_runner_.reset(&command_runner_);
+    command_runner_.commands_ran_.clear();
+
+    state.Reset();
+    EXPECT_TRUE(builder.AddTarget("out", &err));
+    ASSERT_EQ("", err);
+    EXPECT_TRUE(builder.Build(&err));
+    ASSERT_EQ(1u, command_runner_.commands_ran_.size());
+
+    builder.command_runner_.release();
+  }
+
+  {
+    // This time, the header.h file was not updated during the build, so the target should
+    // not be considered dirty.
+    Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0);
+    builder.command_runner_.reset(&command_runner_);
+    command_runner_.commands_ran_.clear();
+
+    state.Reset();
+    EXPECT_TRUE(builder.AddTarget("out", &err));
+    ASSERT_EQ("", err);
+    EXPECT_TRUE(builder.AlreadyUpToDate());
+
+    builder.command_runner_.release();
+  }
+}
+
 /// Check that a restat rule generating a header cancels compilations correctly.
 TEST_F(BuildTest, RestatDepfileDependency) {
   ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
@@ -2564,7 +2953,7 @@
 
     // Run the build once, everything should be ok.
     DepsLog deps_log;
-    ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+    ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err));
     ASSERT_EQ("", err);
 
     Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0);
@@ -2590,8 +2979,8 @@
 
     // Run the build again.
     DepsLog deps_log;
-    ASSERT_TRUE(deps_log.Load("ninja_deps", &state, &err));
-    ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+    ASSERT_TRUE(deps_log.Load(deps_log_file_.path(), &state, &err));
+    ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err));
 
     Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0);
     builder.command_runner_.reset(&command_runner_);
@@ -2623,7 +3012,7 @@
 
     // Run the build once, everything should be ok.
     DepsLog deps_log;
-    ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+    ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err));
     ASSERT_EQ("", err);
 
     Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0);
@@ -2643,8 +3032,8 @@
     ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
 
     DepsLog deps_log;
-    ASSERT_TRUE(deps_log.Load("ninja_deps", &state, &err));
-    ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+    ASSERT_TRUE(deps_log.Load(deps_log_file_.path(), &state, &err));
+    ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err));
     ASSERT_EQ("", err);
 
     Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0);
@@ -2656,9 +3045,9 @@
     EXPECT_TRUE(builder.AddTarget("fo o.o", &err));
     ASSERT_EQ("", err);
 
-    // Expect three new edges: one generating fo o.o, and two more from
-    // loading the depfile.
-    ASSERT_EQ(3u, state.edges_.size());
+    // Expect one new edge generating fo o.o, loading the depfile should
+    // not generate new edges.
+    ASSERT_EQ(1u, state.edges_.size());
     // Expect our edge to now have three inputs: foo.c and two headers.
     ASSERT_EQ(3u, edge->inputs_.size());
 
@@ -2694,7 +3083,7 @@
     ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
 
     DepsLog deps_log;
-    ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+    ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err));
     ASSERT_EQ("", err);
 
     Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0);
@@ -2717,8 +3106,8 @@
     ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
 
     DepsLog deps_log;
-    ASSERT_TRUE(deps_log.Load("ninja_deps", &state, &err));
-    ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+    ASSERT_TRUE(deps_log.Load(deps_log_file_.path(), &state, &err));
+    ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err));
     ASSERT_EQ("", err);
 
     Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0);
@@ -2740,8 +3129,8 @@
     ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
 
     DepsLog deps_log;
-    ASSERT_TRUE(deps_log.Load("ninja_deps", &state, &err));
-    ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+    ASSERT_TRUE(deps_log.Load(deps_log_file_.path(), &state, &err));
+    ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err));
     ASSERT_EQ("", err);
 
     Builder builder(&state, config_, &build_log, &deps_log, &fs_, &status_, 0);
@@ -2769,7 +3158,7 @@
 
     // Run the build once, everything should be ok.
     DepsLog deps_log;
-    ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+    ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err));
     ASSERT_EQ("", err);
 
     Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0);
@@ -2791,23 +3180,21 @@
     ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
 
     DepsLog deps_log;
-    ASSERT_TRUE(deps_log.Load("ninja_deps", &state, &err));
-    ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+    ASSERT_TRUE(deps_log.Load(deps_log_file_.path(), &state, &err));
+    ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err));
     ASSERT_EQ("", err);
 
     Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0);
     builder.command_runner_.reset(&command_runner_);
 
-    Edge* edge = state.edges_.back();
-
     state.GetNode("bar.h", 0)->MarkDirty();  // Mark bar.h as missing.
     EXPECT_TRUE(builder.AddTarget("a/b/c/d/e/fo o.o", &err));
     ASSERT_EQ("", err);
 
-    // Expect three new edges: one generating fo o.o, and two more from
-    // loading the depfile.
-    ASSERT_EQ(3u, state.edges_.size());
+    // Expect one new edge generating fo o.o.
+    ASSERT_EQ(1u, state.edges_.size());
     // Expect our edge to now have three inputs: foo.c and two headers.
+    Edge* edge = state.edges_.back();
     ASSERT_EQ(3u, edge->inputs_.size());
 
     // Expect the command line we generate to only use the original input.
@@ -2862,11 +3249,13 @@
   fs_.Create("out.d", "out: header.h");
   fs_.Create("header.h", "");
 
-  RebuildTarget("out", manifest, "build_log", "ninja_deps");
+  RebuildTarget("out", manifest, build_log_file_.c_str(),
+                deps_log_file_.c_str());
   ASSERT_EQ(2u, command_runner_.commands_ran_.size());
 
   // Sanity: this rebuild should be NOOP
-  RebuildTarget("out", manifest, "build_log", "ninja_deps");
+  RebuildTarget("out", manifest, build_log_file_.c_str(),
+                deps_log_file_.c_str());
   ASSERT_EQ(0u, command_runner_.commands_ran_.size());
 
   // Touch 'header.in', blank dependencies log (create a different one).
@@ -2875,12 +3264,14 @@
   fs_.Tick();
   fs_.Create("header.in", "");
 
+  ScopedFilePath deps2_file_("ninja_deps2");
+
   // (switch to a new blank deps_log "ninja_deps2")
-  RebuildTarget("out", manifest, "build_log", "ninja_deps2");
+  RebuildTarget("out", manifest, build_log_file_.c_str(), deps2_file_.c_str());
   ASSERT_EQ(2u, command_runner_.commands_ran_.size());
 
   // Sanity: this build should be NOOP
-  RebuildTarget("out", manifest, "build_log", "ninja_deps2");
+  RebuildTarget("out", manifest, build_log_file_.c_str(), deps2_file_.c_str());
   ASSERT_EQ(0u, command_runner_.commands_ran_.size());
 
   // Check that invalidating deps by target timestamp also works here
@@ -2888,11 +3279,11 @@
   fs_.Tick();
   fs_.Create("header.in", "");
   fs_.Create("out", "");
-  RebuildTarget("out", manifest, "build_log", "ninja_deps2");
+  RebuildTarget("out", manifest, build_log_file_.c_str(), deps2_file_.c_str());
   ASSERT_EQ(2u, command_runner_.commands_ran_.size());
 
   // And this build should be NOOP again
-  RebuildTarget("out", manifest, "build_log", "ninja_deps2");
+  RebuildTarget("out", manifest, build_log_file_.c_str(), deps2_file_.c_str());
   ASSERT_EQ(0u, command_runner_.commands_ran_.size());
 }
 
@@ -2909,7 +3300,10 @@
   fs_.Create("header.h", "");
   fs_.Create("foo.o.d", "bar.o.d: header.h\n");
 
-  RebuildTarget("foo.o", manifest, "build_log", "ninja_deps");
+  ScopedFilePath build_log("build_log");
+  ScopedFilePath deps_file("ninja_deps");
+
+  RebuildTarget("foo.o", manifest, build_log.c_str(), deps_file.c_str());
   ASSERT_EQ(1u, command_runner_.commands_ran_.size());
 }
 
@@ -3042,9 +3436,10 @@
   ASSERT_EQ(2u, fs_.files_read_.size());
   EXPECT_EQ("dd-in", fs_.files_read_[0]);
   EXPECT_EQ("dd", fs_.files_read_[1]);
-  ASSERT_EQ(2u + files_created, fs_.files_created_.size());
+  ASSERT_EQ(3u + files_created, fs_.files_created_.size());
   EXPECT_EQ(1u, fs_.files_created_.count("dd"));
   EXPECT_EQ(1u, fs_.files_created_.count("out"));
+  EXPECT_EQ(1u, fs_.files_created_.count(".ninja_lock"));
 }
 
 TEST_F(BuildTest, DyndepBuildSyntaxError) {
@@ -3355,8 +3750,8 @@
   EXPECT_TRUE(builder_.AddTarget("out", &err));
   ASSERT_EQ("", err);
 
-  // Loading the depfile gave tmp.imp a phony input edge.
-  ASSERT_TRUE(GetNode("tmp.imp")->in_edge()->is_phony());
+  // Loading the depfile did not give tmp.imp a phony input edge.
+  ASSERT_FALSE(GetNode("tmp.imp")->in_edge());
 
   EXPECT_TRUE(builder_.Build(&err));
   EXPECT_EQ("", err);
@@ -3865,7 +4260,7 @@
     ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
 
     DepsLog deps_log;
-    ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+    ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err));
     ASSERT_EQ("", err);
 
     Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0);
@@ -3900,8 +4295,8 @@
     ASSERT_NO_FATAL_FAILURE(AssertParse(&state, manifest));
 
     DepsLog deps_log;
-    ASSERT_TRUE(deps_log.Load("ninja_deps", &state, &err));
-    ASSERT_TRUE(deps_log.OpenForWrite("ninja_deps", &err));
+    ASSERT_TRUE(deps_log.Load(deps_log_file_.path(), &state, &err));
+    ASSERT_TRUE(deps_log.OpenForWrite(deps_log_file_.path(), &err));
     ASSERT_EQ("", err);
 
     Builder builder(&state, config_, NULL, &deps_log, &fs_, &status_, 0);
diff --git a/src/clean.cc b/src/clean.cc
index 575bf6b..ceffe64 100644
--- a/src/clean.cc
+++ b/src/clean.cc
@@ -127,6 +127,7 @@
 int Cleaner::CleanDead(const BuildLog::Entries& entries) {
   Reset();
   PrintHeader();
+  LoadDyndeps();
   for (BuildLog::Entries::const_iterator i = entries.begin(); i != entries.end(); ++i) {
     Node* n = state_->LookupNode(i->first);
     // Detecting stale outputs works as follows:
@@ -292,7 +293,8 @@
   // Load dyndep files that exist, before they are cleaned.
   for (vector<Edge*>::iterator e = state_->edges_.begin();
        e != state_->edges_.end(); ++e) {
-    if (Node* dyndep = (*e)->dyndep_) {
+    Node* dyndep;
+    if ((dyndep = (*e)->dyndep_) && dyndep->dyndep_pending()) {
       // Capture and ignore errors loading the dyndep file.
       // We clean as much of the graph as we know.
       std::string err;
diff --git a/src/depfile_parser.cc b/src/depfile_parser.cc
index 98fba2e..7ce7290 100644
--- a/src/depfile_parser.cc
+++ b/src/depfile_parser.cc
@@ -54,6 +54,7 @@
   bool have_target = false;
   bool parsing_targets = true;
   bool poisoned_input = false;
+  bool is_empty = true;
   while (in < end) {
     bool have_newline = false;
     // out: current output point (typically same as in, but can fall behind
@@ -335,6 +336,7 @@
     }
 
     if (len > 0) {
+      is_empty = false;
       StringPiece piece = StringPiece(filename, len);
       // If we've seen this as an input before, skip it.
       std::vector<StringPiece>::iterator pos = std::find(ins_.begin(), ins_.end(), piece);
@@ -363,7 +365,7 @@
       poisoned_input = false;
     }
   }
-  if (!have_target) {
+  if (!have_target && !is_empty) {
     *err = "expected ':' in depfile";
     return false;
   }
diff --git a/src/depfile_parser.in.cc b/src/depfile_parser.in.cc
index 75ba982..4b5f5fe 100644
--- a/src/depfile_parser.in.cc
+++ b/src/depfile_parser.in.cc
@@ -53,6 +53,7 @@
   bool have_target = false;
   bool parsing_targets = true;
   bool poisoned_input = false;
+  bool is_empty = true;
   while (in < end) {
     bool have_newline = false;
     // out: current output point (typically same as in, but can fall behind
@@ -171,6 +172,7 @@
     }
 
     if (len > 0) {
+      is_empty = false;
       StringPiece piece = StringPiece(filename, len);
       // If we've seen this as an input before, skip it.
       std::vector<StringPiece>::iterator pos = std::find(ins_.begin(), ins_.end(), piece);
@@ -199,7 +201,7 @@
       poisoned_input = false;
     }
   }
-  if (!have_target) {
+  if (!have_target && !is_empty) {
     *err = "expected ':' in depfile";
     return false;
   }
diff --git a/src/depfile_parser_test.cc b/src/depfile_parser_test.cc
index 8886258..947ae76 100644
--- a/src/depfile_parser_test.cc
+++ b/src/depfile_parser_test.cc
@@ -378,3 +378,24 @@
                      "z:\n", &err));
   ASSERT_EQ("inputs may not also have inputs", err);
 }
+
+TEST_F(DepfileParserTest, EmptyFile) {
+  std::string err;
+  EXPECT_TRUE(Parse("", &err));
+  ASSERT_EQ(0u, parser_.outs_.size());
+  ASSERT_EQ(0u, parser_.ins_.size());
+}
+
+TEST_F(DepfileParserTest, EmptyLines) {
+  std::string err;
+  EXPECT_TRUE(Parse("\n\n", &err));
+  ASSERT_EQ(0u, parser_.outs_.size());
+  ASSERT_EQ(0u, parser_.ins_.size());
+}
+
+TEST_F(DepfileParserTest, MissingColon) {
+  // The file is not empty but is missing a colon separator.
+  std::string err;
+  EXPECT_FALSE(Parse("foo.o foo.c\n", &err));
+  EXPECT_EQ("expected ':' in depfile", err);
+}
diff --git a/src/deps_log.cc b/src/deps_log.cc
index 7e48b38..e32a7a9 100644
--- a/src/deps_log.cc
+++ b/src/deps_log.cc
@@ -361,7 +361,7 @@
   return true;
 }
 
-bool DepsLog::IsDepsEntryLiveFor(Node* node) {
+bool DepsLog::IsDepsEntryLiveFor(const Node* node) {
   // Skip entries that don't have in-edges or whose edges don't have a
   // "deps" attribute. They were in the deps log from previous builds, but
   // the the files they were for were removed from the build and their deps
diff --git a/src/deps_log.h b/src/deps_log.h
index 09cc41c..2a1b188 100644
--- a/src/deps_log.h
+++ b/src/deps_log.h
@@ -97,7 +97,7 @@
   /// past but are no longer part of the manifest.  This function returns if
   /// this is the case for a given node.  This function is slow, don't call
   /// it from code that runs on every build.
-  bool IsDepsEntryLiveFor(Node* node);
+  static bool IsDepsEntryLiveFor(const Node* node);
 
   /// Used for tests.
   const std::vector<Node*>& nodes() const { return nodes_; }
diff --git a/src/deps_log_test.cc b/src/deps_log_test.cc
index 13fcc78..cb1c925 100644
--- a/src/deps_log_test.cc
+++ b/src/deps_log_test.cc
@@ -138,9 +138,13 @@
     deps.push_back(state.GetNode("bar.h", 0));
     log.RecordDeps(state.GetNode("out.o", 0), 1, deps);
     log.Close();
-
+#ifdef __USE_LARGEFILE64
+    struct stat64 st;
+    ASSERT_EQ(0, stat64(kTestFilename, &st));
+#else
     struct stat st;
     ASSERT_EQ(0, stat(kTestFilename, &st));
+#endif
     file_size = (int)st.st_size;
     ASSERT_GT(file_size, 0);
   }
@@ -160,9 +164,13 @@
     deps.push_back(state.GetNode("bar.h", 0));
     log.RecordDeps(state.GetNode("out.o", 0), 1, deps);
     log.Close();
-
+#ifdef __USE_LARGEFILE64
+    struct stat64 st;
+    ASSERT_EQ(0, stat64(kTestFilename, &st));
+#else
     struct stat st;
     ASSERT_EQ(0, stat(kTestFilename, &st));
+#endif
     int file_size_2 = (int)st.st_size;
     ASSERT_EQ(file_size, file_size_2);
   }
@@ -198,9 +206,13 @@
     log.RecordDeps(state.GetNode("other_out.o", 0), 1, deps);
 
     log.Close();
-
+#ifdef __USE_LARGEFILE64
+    struct stat64 st;
+    ASSERT_EQ(0, stat64(kTestFilename, &st));
+#else
     struct stat st;
     ASSERT_EQ(0, stat(kTestFilename, &st));
+#endif
     file_size = (int)st.st_size;
     ASSERT_GT(file_size, 0);
   }
@@ -222,8 +234,13 @@
     log.RecordDeps(state.GetNode("out.o", 0), 1, deps);
     log.Close();
 
+#ifdef __USE_LARGEFILE64
+    struct stat64 st;
+    ASSERT_EQ(0, stat64(kTestFilename, &st));
+#else
     struct stat st;
     ASSERT_EQ(0, stat(kTestFilename, &st));
+#endif
     file_size_2 = (int)st.st_size;
     // The file should grow to record the new deps.
     ASSERT_GT(file_size_2, file_size);
@@ -273,8 +290,13 @@
     ASSERT_EQ(other_out, log.nodes()[other_out->id()]);
 
     // The file should have shrunk a bit for the smaller deps.
+#ifdef __USE_LARGEFILE64
+    struct stat64 st;
+    ASSERT_EQ(0, stat64(kTestFilename, &st));
+#else
     struct stat st;
     ASSERT_EQ(0, stat(kTestFilename, &st));
+#endif
     file_size_3 = (int)st.st_size;
     ASSERT_LT(file_size_3, file_size_2);
   }
@@ -317,8 +339,13 @@
     ASSERT_EQ(-1, state.LookupNode("baz.h")->id());
 
     // The file should have shrunk more.
+#ifdef __USE_LARGEFILE64
+    struct stat64 st;
+    ASSERT_EQ(0, stat64(kTestFilename, &st));
+#else
     struct stat st;
     ASSERT_EQ(0, stat(kTestFilename, &st));
+#endif
     int file_size_4 = (int)st.st_size;
     ASSERT_LT(file_size_4, file_size_3);
   }
@@ -374,8 +401,13 @@
   }
 
   // Get the file size.
+#ifdef __USE_LARGEFILE64
+  struct stat64 st;
+  ASSERT_EQ(0, stat64(kTestFilename, &st));
+#else
   struct stat st;
   ASSERT_EQ(0, stat(kTestFilename, &st));
+#endif
 
   // Try reloading at truncated sizes.
   // Track how many nodes/deps were found; they should decrease with
@@ -434,8 +466,13 @@
 
   // Shorten the file, corrupting the last record.
   {
+#ifdef __USE_LARGEFILE64
+    struct stat64 st;
+    ASSERT_EQ(0, stat64(kTestFilename, &st));
+#else
     struct stat st;
     ASSERT_EQ(0, stat(kTestFilename, &st));
+#endif
     string err;
     ASSERT_TRUE(Truncate(kTestFilename, st.st_size - 2, &err));
   }
diff --git a/src/disk_interface.cc b/src/disk_interface.cc
index e73d901..0f27e9d 100644
--- a/src/disk_interface.cc
+++ b/src/disk_interface.cc
@@ -23,9 +23,10 @@
 #include <sys/types.h>
 
 #ifdef _WIN32
-#include <sstream>
-#include <windows.h>
 #include <direct.h>  // _mkdir
+#include <windows.h>
+
+#include <sstream>
 #else
 #include <unistd.h>
 #endif
@@ -110,7 +111,8 @@
 
   if (find_handle == INVALID_HANDLE_VALUE) {
     DWORD win_err = GetLastError();
-    if (win_err == ERROR_FILE_NOT_FOUND || win_err == ERROR_PATH_NOT_FOUND)
+    if (win_err == ERROR_FILE_NOT_FOUND || win_err == ERROR_PATH_NOT_FOUND ||
+        win_err == ERROR_DIRECTORY)
       return true;
     *err = "FindFirstFileExA(" + dir + "): " + GetLastErrorString();
     return false;
@@ -156,13 +158,33 @@
 }
 
 // RealDiskInterface -----------------------------------------------------------
+RealDiskInterface::RealDiskInterface() 
+#ifdef _WIN32
+: use_cache_(false), long_paths_enabled_(false) {
+  setlocale(LC_ALL, "");
+
+  // Probe ntdll.dll for RtlAreLongPathsEnabled, and call it if it exists.
+  HINSTANCE ntdll_lib = ::GetModuleHandleW(L"ntdll");
+  if (ntdll_lib) {
+    typedef BOOLEAN(WINAPI FunctionType)();
+    auto* func_ptr = reinterpret_cast<FunctionType*>(
+        ::GetProcAddress(ntdll_lib, "RtlAreLongPathsEnabled"));
+    if (func_ptr) {
+      long_paths_enabled_ = (*func_ptr)();
+    }
+  }
+}
+#else
+{}
+#endif
 
 TimeStamp RealDiskInterface::Stat(const string& path, string* err) const {
   METRIC_RECORD("node stat");
 #ifdef _WIN32
   // MSDN: "Naming Files, Paths, and Namespaces"
   // http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
-  if (!path.empty() && path[0] != '\\' && path.size() > MAX_PATH) {
+  if (!path.empty() && !AreLongPathsEnabled() && path[0] != '\\' &&
+      path.size() > MAX_PATH) {
     ostringstream err_stream;
     err_stream << "Stat(" << path << "): Filename longer than " << MAX_PATH
                << " characters";
@@ -195,8 +217,13 @@
   DirCache::iterator di = ci->second.find(base);
   return di != ci->second.end() ? di->second : 0;
 #else
+#ifdef __USE_LARGEFILE64
+  struct stat64 st;
+  if (stat64(path.c_str(), &st) < 0) {
+#else
   struct stat st;
   if (stat(path.c_str(), &st) < 0) {
+#endif
     if (errno == ENOENT || errno == ENOTDIR)
       return 0;
     *err = "stat(" + path + "): " + strerror(errno);
@@ -267,7 +294,7 @@
 
 int RealDiskInterface::RemoveFile(const string& path) {
 #ifdef _WIN32
-  DWORD attributes = GetFileAttributes(path.c_str());
+  DWORD attributes = GetFileAttributesA(path.c_str());
   if (attributes == INVALID_FILE_ATTRIBUTES) {
     DWORD win_err = GetLastError();
     if (win_err == ERROR_FILE_NOT_FOUND || win_err == ERROR_PATH_NOT_FOUND) {
@@ -278,7 +305,7 @@
     // On Windows Ninja should behave the same:
     //   https://github.com/ninja-build/ninja/issues/1886
     // Skip error checking.  If this fails, accept whatever happens below.
-    SetFileAttributes(path.c_str(), attributes & ~FILE_ATTRIBUTE_READONLY);
+    SetFileAttributesA(path.c_str(), attributes & ~FILE_ATTRIBUTE_READONLY);
   }
   if (attributes & FILE_ATTRIBUTE_DIRECTORY) {
     // remove() deletes both files and directories. On Windows we have to 
@@ -286,7 +313,7 @@
     // used on a directory)
     // This fixes the behavior of ninja -t clean in some cases
     // https://github.com/ninja-build/ninja/issues/828
-    if (!RemoveDirectory(path.c_str())) {
+    if (!RemoveDirectoryA(path.c_str())) {
       DWORD win_err = GetLastError();
       if (win_err == ERROR_FILE_NOT_FOUND || win_err == ERROR_PATH_NOT_FOUND) {
         return 1;
@@ -296,7 +323,7 @@
       return -1;
     }
   } else {
-    if (!DeleteFile(path.c_str())) {
+    if (!DeleteFileA(path.c_str())) {
       DWORD win_err = GetLastError();
       if (win_err == ERROR_FILE_NOT_FOUND || win_err == ERROR_PATH_NOT_FOUND) {
         return 1;
@@ -327,3 +354,9 @@
     cache_.clear();
 #endif
 }
+
+#ifdef _WIN32
+bool RealDiskInterface::AreLongPathsEnabled(void) const {
+  return long_paths_enabled_;
+}
+#endif
diff --git a/src/disk_interface.h b/src/disk_interface.h
index bc29ab7..74200b8 100644
--- a/src/disk_interface.h
+++ b/src/disk_interface.h
@@ -69,11 +69,7 @@
 
 /// Implementation of DiskInterface that actually hits the disk.
 struct RealDiskInterface : public DiskInterface {
-  RealDiskInterface()
-#ifdef _WIN32
-                      : use_cache_(false)
-#endif
-                      {}
+  RealDiskInterface();
   virtual ~RealDiskInterface() {}
   virtual TimeStamp Stat(const std::string& path, std::string* err) const;
   virtual bool MakeDir(const std::string& path);
@@ -85,11 +81,19 @@
   /// Whether stat information can be cached.  Only has an effect on Windows.
   void AllowStatCache(bool allow);
 
+#ifdef _WIN32
+  /// Whether long paths are enabled.  Only has an effect on Windows.
+  bool AreLongPathsEnabled() const;
+#endif
+
  private:
 #ifdef _WIN32
   /// Whether stat information can be cached.
   bool use_cache_;
 
+  /// Whether long paths are enabled.
+  bool long_paths_enabled_;
+
   typedef std::map<std::string, TimeStamp> DirCache;
   // TODO: Neither a map nor a hashmap seems ideal here.  If the statcache
   // works out, come up with a better data structure.
diff --git a/src/disk_interface_test.cc b/src/disk_interface_test.cc
index 5e952ed..e8d869c 100644
--- a/src/disk_interface_test.cc
+++ b/src/disk_interface_test.cc
@@ -17,6 +17,7 @@
 #ifdef _WIN32
 #include <io.h>
 #include <windows.h>
+#include <direct.h>
 #endif
 
 #include "disk_interface.h"
@@ -65,6 +66,17 @@
   EXPECT_EQ("", err);
 }
 
+TEST_F(DiskInterfaceTest, StatMissingFileWithCache) {
+  disk_.AllowStatCache(true);
+  string err;
+
+  // On Windows, the errno for FindFirstFileExA, which is used when the stat
+  // cache is enabled, is different when the directory name is not a directory.
+  ASSERT_TRUE(Touch("notadir"));
+  EXPECT_EQ(0, disk_.Stat("notadir/nosuchfile", &err));
+  EXPECT_EQ("", err);
+}
+
 TEST_F(DiskInterfaceTest, StatBadPath) {
   string err;
 #ifdef _WIN32
@@ -85,6 +97,24 @@
   EXPECT_EQ("", err);
 }
 
+#ifdef _WIN32
+TEST_F(DiskInterfaceTest, StatExistingFileWithLongPath) {
+  string err;
+  char currentdir[32767];
+  _getcwd(currentdir, sizeof(currentdir));
+  const string filename = string(currentdir) +
+"\\filename_with_256_characters_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\
+xxxxxxxxxxxxxxxxxxxxx";
+  const string prefixed = "\\\\?\\" + filename;
+  ASSERT_TRUE(Touch(prefixed.c_str()));
+  EXPECT_GT(disk_.Stat(disk_.AreLongPathsEnabled() ?
+    filename : prefixed, &err), 1);
+  EXPECT_EQ("", err);
+}
+#endif
+
 TEST_F(DiskInterfaceTest, StatExistingDir) {
   string err;
   ASSERT_TRUE(disk_.MakeDir("subdir"));
@@ -198,7 +228,7 @@
   EXPECT_EQ(0, fclose(f));
 #ifdef _WIN32
   string path2 = "another\\with\\back\\\\slashes\\";
-  EXPECT_TRUE(disk_.MakeDirs(path2.c_str()));
+  EXPECT_TRUE(disk_.MakeDirs(path2));
   FILE* f2 = fopen((path2 + "a_file").c_str(), "w");
   EXPECT_TRUE(f2);
   EXPECT_EQ(0, fclose(f2));
diff --git a/src/dyndep.cc b/src/dyndep.cc
index dd4ed09..a0d699d 100644
--- a/src/dyndep.cc
+++ b/src/dyndep.cc
@@ -97,15 +97,10 @@
   for (std::vector<Node*>::const_iterator i =
            dyndeps->implicit_outputs_.begin();
        i != dyndeps->implicit_outputs_.end(); ++i) {
-    if (Edge* old_in_edge = (*i)->in_edge()) {
-      // This node already has an edge producing it.  Fail with an error
-      // unless the edge was generated by ImplicitDepLoader, in which
-      // case we can replace it with the now-known real producer.
-      if (!old_in_edge->generated_by_dep_loader_) {
-        *err = "multiple rules generate " + (*i)->path();
-        return false;
-      }
-      old_in_edge->outputs_.clear();
+    if ((*i)->in_edge()) {
+      // This node already has an edge producing it.
+      *err = "multiple rules generate " + (*i)->path();
+      return false;
     }
     (*i)->set_in_edge(edge);
   }
diff --git a/src/graph.cc b/src/graph.cc
index 43ba45a..31b109a 100644
--- a/src/graph.cc
+++ b/src/graph.cc
@@ -32,7 +32,6 @@
 using namespace std;
 
 bool Node::Stat(DiskInterface* disk_interface, string* err) {
-  METRIC_RECORD("node stat");
   mtime_ = disk_interface->Stat(path_, err);
   if (mtime_ == -1) {
     return false;
@@ -298,37 +297,34 @@
     return false;
   }
 
-  BuildLog::LogEntry* entry = 0;
-
   // Dirty if we're missing the output.
   if (!output->exists()) {
     EXPLAIN("output %s doesn't exist", output->path().c_str());
     return true;
   }
 
+  BuildLog::LogEntry* entry = 0;
+
+  // If this is a restat rule, we may have cleaned the output in a
+  // previous run and stored the command start time in the build log.
+  // We don't want to consider a restat rule's outputs as dirty unless
+  // an input changed since the last run, so we'll skip checking the
+  // output file's actual mtime and simply check the recorded mtime from
+  // the log against the most recent input's mtime (see below)
+  bool used_restat = false;
+  if (edge->GetBindingBool("restat") && build_log() &&
+      (entry = build_log()->LookupByOutput(output->path()))) {
+    used_restat = true;
+  }
+
   // Dirty if the output is older than the input.
-  if (most_recent_input && output->mtime() < most_recent_input->mtime()) {
-    TimeStamp output_mtime = output->mtime();
-
-    // If this is a restat rule, we may have cleaned the output with a restat
-    // rule in a previous run and stored the most recent input mtime in the
-    // build log.  Use that mtime instead, so that the file will only be
-    // considered dirty if an input was modified since the previous run.
-    bool used_restat = false;
-    if (edge->GetBindingBool("restat") && build_log() &&
-        (entry = build_log()->LookupByOutput(output->path()))) {
-      output_mtime = entry->mtime;
-      used_restat = true;
-    }
-
-    if (output_mtime < most_recent_input->mtime()) {
-      EXPLAIN("%soutput %s older than most recent input %s "
-              "(%" PRId64 " vs %" PRId64 ")",
-              used_restat ? "restat of " : "", output->path().c_str(),
-              most_recent_input->path().c_str(),
-              output_mtime, most_recent_input->mtime());
-      return true;
-    }
+  if (!used_restat && most_recent_input && output->mtime() < most_recent_input->mtime()) {
+    EXPLAIN("output %s older than most recent input %s "
+            "(%" PRId64 " vs %" PRId64 ")",
+            output->path().c_str(),
+            most_recent_input->path().c_str(),
+            output->mtime(), most_recent_input->mtime());
+    return true;
   }
 
   if (build_log()) {
@@ -346,7 +342,9 @@
         // May also be dirty due to the mtime in the log being older than the
         // mtime of the most recent input.  This can occur even when the mtime
         // on disk is newer if a previous run wrote to the output file but
-        // exited with an error or was interrupted.
+        // exited with an error or was interrupted. If this was a restat rule,
+        // then we only check the recorded mtime against the most recent input
+        // mtime and ignore the actual output's mtime above.
         EXPLAIN("recorded mtime of %s older than most recent input %s (%" PRId64 " vs %" PRId64 ")",
                 output->path().c_str(), most_recent_input->path().c_str(),
                 entry->mtime, most_recent_input->mtime());
@@ -393,7 +391,7 @@
   std::string MakePathList(const Node* const* span, size_t size, char sep) const;
 
  private:
-  vector<string> lookups_;
+  std::vector<std::string> lookups_;
   const Edge* const edge_;
   EscapeKind escape_in_out_;
   bool recursive_;
@@ -403,21 +401,50 @@
   if (var == "in" || var == "in_newline") {
     int explicit_deps_count = edge_->inputs_.size() - edge_->implicit_deps_ -
       edge_->order_only_deps_;
-#if __cplusplus >= 201103L
     return MakePathList(edge_->inputs_.data(), explicit_deps_count,
-#else
-    return MakePathList(&edge_->inputs_[0], explicit_deps_count,
-#endif
                         var == "in" ? ' ' : '\n');
   } else if (var == "out") {
     int explicit_outs_count = edge_->outputs_.size() - edge_->implicit_outs_;
     return MakePathList(&edge_->outputs_[0], explicit_outs_count, ' ');
   }
 
+  // Technical note about the lookups_ vector.
+  //
+  // This is used to detect cycles during recursive variable expansion
+  // which can be seen as a graph traversal problem. Consider the following
+  // example:
+  //
+  //    rule something
+  //      command = $foo $foo $var1
+  //      var1 = $var2
+  //      var2 = $var3
+  //      var3 = $var1
+  //      foo = FOO
+  //
+  // Each variable definition can be seen as a node in a graph that looks
+  // like the following:
+  //
+  //   command --> foo
+  //      |
+  //      v
+  //    var1 <-----.
+  //      |        |
+  //      v        |
+  //    var2 ---> var3
+  //
+  // The lookups_ vector is used as a stack of visited nodes/variables
+  // during recursive expansion. Entering a node adds an item to the
+  // stack, leaving the node removes it.
+  //
+  // The recursive_ flag is used as a small performance optimization
+  // to never record the starting node in the stack when beginning a new
+  // expansion, since in most cases, expansions are not recursive
+  // at all.
+  //
   if (recursive_) {
-    vector<string>::const_iterator it;
-    if ((it = find(lookups_.begin(), lookups_.end(), var)) != lookups_.end()) {
-      string cycle;
+    auto it = std::find(lookups_.begin(), lookups_.end(), var);
+    if (it != lookups_.end()) {
+      std::string cycle;
       for (; it != lookups_.end(); ++it)
         cycle.append(*it + " -> ");
       cycle.append(var);
@@ -427,13 +454,17 @@
 
   // See notes on BindingEnv::LookupWithFallback.
   const EvalString* eval = edge_->rule_->GetBinding(var);
-  if (recursive_ && eval)
+  bool record_varname = recursive_ && eval;
+  if (record_varname)
     lookups_.push_back(var);
 
   // In practice, variables defined on rules never use another rule variable.
   // For performance, only start checking for cycles after the first lookup.
   recursive_ = true;
-  return edge_->env_->LookupWithFallback(var, eval, this);
+  std::string result = edge_->env_->LookupWithFallback(var, eval, this);
+  if (record_varname)
+    lookups_.pop_back();
+  return result;
 }
 
 std::string EdgeEnv::MakePathList(const Node* const* const span,
@@ -696,7 +727,6 @@
     Node* node = state_->GetNode(*i, slash_bits);
     *implicit_dep = node;
     node->AddOutEdge(edge);
-    CreatePhonyInEdge(node);
   }
 
   return true;
@@ -724,7 +754,6 @@
     Node* node = deps->nodes[i];
     *implicit_dep = node;
     node->AddOutEdge(edge);
-    CreatePhonyInEdge(node);
   }
   return true;
 }
@@ -736,21 +765,3 @@
   edge->implicit_deps_ += count;
   return edge->inputs_.end() - edge->order_only_deps_ - count;
 }
-
-void ImplicitDepLoader::CreatePhonyInEdge(Node* node) {
-  if (node->in_edge())
-    return;
-
-  Edge* phony_edge = state_->AddEdge(&State::kPhonyRule);
-  phony_edge->generated_by_dep_loader_ = true;
-  node->set_in_edge(phony_edge);
-  phony_edge->outputs_.push_back(node);
-
-  // RecomputeDirty might not be called for phony_edge if a previous call
-  // to RecomputeDirty had caused the file to be stat'ed.  Because previous
-  // invocations of RecomputeDirty would have seen this node without an
-  // input edge (and therefore ready), we have to set outputs_ready_ to true
-  // to avoid a potential stuck build.  If we do call RecomputeDirty for
-  // this node, it will simply set outputs_ready_ to the correct value.
-  phony_edge->outputs_ready_ = true;
-}
diff --git a/src/graph.h b/src/graph.h
index 9de67d2..820a265 100644
--- a/src/graph.h
+++ b/src/graph.h
@@ -19,6 +19,7 @@
 #include <set>
 #include <string>
 #include <vector>
+#include <queue>
 
 #include "dyndep.h"
 #include "eval_env.h"
@@ -38,14 +39,7 @@
 /// it's dirty, mtime, etc.
 struct Node {
   Node(const std::string& path, uint64_t slash_bits)
-      : path_(path),
-        slash_bits_(slash_bits),
-        mtime_(-1),
-        exists_(ExistenceStatusUnknown),
-        dirty_(false),
-        dyndep_pending_(false),
-        in_edge_(NULL),
-        id_(-1) {}
+      : path_(path), slash_bits_(slash_bits) {}
 
   /// Return false on error.
   bool Stat(DiskInterface* disk_interface, std::string* err);
@@ -104,6 +98,14 @@
   Edge* in_edge() const { return in_edge_; }
   void set_in_edge(Edge* edge) { in_edge_ = edge; }
 
+  /// Indicates whether this node was generated from a depfile or dyndep file,
+  /// instead of being a regular input or output from the Ninja manifest.
+  bool generated_by_dep_loader() const { return generated_by_dep_loader_; }
+
+  void set_generated_by_dep_loader(bool value) {
+    generated_by_dep_loader_ = value;
+  }
+
   int id() const { return id_; }
   void set_id(int id) { id_ = id; }
 
@@ -119,13 +121,13 @@
 
   /// Set bits starting from lowest for backslashes that were normalized to
   /// forward slashes by CanonicalizePath. See |PathDecanonicalized|.
-  uint64_t slash_bits_;
+  uint64_t slash_bits_ = 0;
 
   /// Possible values of mtime_:
   ///   -1: file hasn't been examined
   ///   0:  we looked, and file doesn't exist
   ///   >0: actual file's mtime, or the latest mtime of its dependencies if it doesn't exist
-  TimeStamp mtime_;
+  TimeStamp mtime_ = -1;
 
   enum ExistenceStatus {
     /// The file hasn't been examined.
@@ -135,20 +137,27 @@
     /// The path is an actual file. mtime_ will be the file's mtime.
     ExistenceStatusExists
   };
-  ExistenceStatus exists_;
+  ExistenceStatus exists_ = ExistenceStatusUnknown;
 
   /// Dirty is true when the underlying file is out-of-date.
   /// But note that Edge::outputs_ready_ is also used in judging which
   /// edges to build.
-  bool dirty_;
+  bool dirty_ = false;
 
   /// Store whether dyndep information is expected from this node but
   /// has not yet been loaded.
-  bool dyndep_pending_;
+  bool dyndep_pending_ = false;
+
+  /// Set to true when this node comes from a depfile, a dyndep file or the
+  /// deps log. If it does not have a producing edge, the build should not
+  /// abort if it is missing (as for regular source inputs). By default
+  /// all nodes have this flag set to true, since the deps and build logs
+  /// can be loaded before the manifest.
+  bool generated_by_dep_loader_ = true;
 
   /// The Edge that produces this Node, or NULL when there is no
   /// known edge to produce it.
-  Edge* in_edge_;
+  Edge* in_edge_ = nullptr;
 
   /// All Edges that use this Node as an input.
   std::vector<Edge*> out_edges_;
@@ -157,7 +166,7 @@
   std::vector<Edge*> validation_out_edges_;
 
   /// A dense integer id for the node, assigned and used by DepsLog.
-  int id_;
+  int id_ = -1;
 };
 
 /// An edge in the dependency graph; links between Nodes using Rules.
@@ -168,11 +177,7 @@
     VisitDone
   };
 
-  Edge()
-      : rule_(NULL), pool_(NULL), dyndep_(NULL), env_(NULL), mark_(VisitNone),
-        id_(0), outputs_ready_(false), deps_loaded_(false),
-        deps_missing_(false), generated_by_dep_loader_(false),
-        implicit_deps_(0), order_only_deps_(0), implicit_outs_(0) {}
+  Edge() = default;
 
   /// Return true if all inputs' in-edges are ready.
   bool AllInputsReady() const;
@@ -198,19 +203,30 @@
   // Append all edge explicit inputs to |*out|. Possibly with shell escaping.
   void CollectInputs(bool shell_escape, std::vector<std::string>* out) const;
 
-  const Rule* rule_;
-  Pool* pool_;
+  // critical_path_weight is the priority during build scheduling. The
+  // "critical path" between this edge's inputs and any target node is
+  // the path which maximises the sum oof weights along that path.
+  // NOTE: Defaults to -1 as a marker smaller than any valid weight
+  int64_t critical_path_weight() const { return critical_path_weight_; }
+  void set_critical_path_weight(int64_t critical_path_weight) {
+    critical_path_weight_ = critical_path_weight;
+  }
+
+  const Rule* rule_ = nullptr;
+  Pool* pool_ = nullptr;
   std::vector<Node*> inputs_;
   std::vector<Node*> outputs_;
   std::vector<Node*> validations_;
-  Node* dyndep_;
-  BindingEnv* env_;
-  VisitMark mark_;
-  size_t id_;
-  bool outputs_ready_;
-  bool deps_loaded_;
-  bool deps_missing_;
-  bool generated_by_dep_loader_;
+  Node* dyndep_ = nullptr;
+  BindingEnv* env_ = nullptr;
+  VisitMark mark_ = VisitNone;
+  size_t id_ = 0;
+  int64_t critical_path_weight_ = -1;
+  bool outputs_ready_ = false;
+  bool deps_loaded_ = false;
+  bool deps_missing_ = false;
+  bool generated_by_dep_loader_ = false;
+  TimeStamp command_start_time_ = 0;
 
   const Rule& rule() const { return *rule_; }
   Pool* pool() const { return pool_; }
@@ -225,8 +241,8 @@
   //                     don't cause the target to rebuild.
   // These are stored in inputs_ in that order, and we keep counts of
   // #2 and #3 when we need to access the various subsets.
-  int implicit_deps_;
-  int order_only_deps_;
+  int implicit_deps_ = 0;
+  int order_only_deps_ = 0;
   bool is_implicit(size_t index) {
     return index >= inputs_.size() - order_only_deps_ - implicit_deps_ &&
         !is_order_only(index);
@@ -240,7 +256,7 @@
   // 2) implicit outs, which the target generates but are not part of $out.
   // These are stored in outputs_ in that order, and we keep a count of
   // #2 to use when we need to access the various subsets.
-  int implicit_outs_;
+  int implicit_outs_ = 0;
   bool is_implicit_out(size_t index) const {
     return index >= outputs_.size() - implicit_outs_;
   }
@@ -248,6 +264,10 @@
   bool is_phony() const;
   bool use_console() const;
   bool maybe_phonycycle_diagnostic() const;
+
+  // Historical info: how long did this edge take last time,
+  // as per .ninja_log, if known? Defaults to -1 if unknown.
+  int64_t prev_elapsed_time_millis = -1;
 };
 
 struct EdgeCmp {
@@ -295,11 +315,6 @@
   /// an iterator pointing at the first new space.
   std::vector<Node*>::iterator PreallocateSpace(Edge* edge, int count);
 
-  /// If we don't have a edge that generates this input already,
-  /// create one; this makes us not abort if the input is missing,
-  /// but instead will rebuild in that circumstance.
-  void CreatePhonyInEdge(Node* node);
-
   State* state_;
   DiskInterface* disk_interface_;
   DepsLog* deps_log_;
@@ -366,4 +381,40 @@
   DyndepLoader dyndep_loader_;
 };
 
+// Implements a less comparison for edges by priority, where highest
+// priority is defined lexicographically first by largest critical
+// time, then lowest ID.
+//
+// Including ID means that wherever the critical path weights are the
+// same, the edges are executed in ascending ID order which was
+// historically how all tasks were scheduled.
+struct EdgePriorityLess {
+  bool operator()(const Edge* e1, const Edge* e2) const {
+    const int64_t cw1 = e1->critical_path_weight();
+    const int64_t cw2 = e2->critical_path_weight();
+    if (cw1 != cw2) {
+      return cw1 < cw2;
+    }
+    return e1->id_ > e2->id_;
+  }
+};
+
+// Reverse of EdgePriorityLess, e.g. to sort by highest priority first
+struct EdgePriorityGreater {
+  bool operator()(const Edge* e1, const Edge* e2) const {
+    return EdgePriorityLess()(e2, e1);
+  }
+};
+
+// A priority queue holding non-owning Edge pointers. top() will
+// return the edge with the largest critical path weight, and lowest
+// ID if more than one edge has the same critical path weight.
+class EdgePriorityQueue:
+  public std::priority_queue<Edge*, std::vector<Edge*>, EdgePriorityLess>{
+public:
+  void clear() {
+    c.clear();
+  }
+};
+
 #endif  // NINJA_GRAPH_H_
diff --git a/src/graph_test.cc b/src/graph_test.cc
index 9dba8af..fb0513c 100644
--- a/src/graph_test.cc
+++ b/src/graph_test.cc
@@ -977,3 +977,56 @@
   EXPECT_EQ(out1->mtime(), out1Mtime1);
   EXPECT_TRUE(out1->dirty());
 }
+
+// Test that EdgeQueue correctly prioritizes by critical time
+TEST_F(GraphTest, EdgeQueuePriority) {
+
+  ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
+"rule r\n"
+"  command = unused\n"
+"build out1: r in1\n"
+"build out2: r in2\n"
+"build out3: r in3\n"
+));
+
+  const int n_edges = 3;
+  Edge *(edges)[n_edges] = {
+    GetNode("out1")->in_edge(),
+    GetNode("out2")->in_edge(),
+    GetNode("out3")->in_edge(),
+  };
+
+  // Output is largest critical time to smallest
+  for (int i = 0; i < n_edges; ++i) {
+    edges[i]->set_critical_path_weight(i * 10);
+  }
+
+  EdgePriorityQueue queue;
+  for (int i = 0; i < n_edges; ++i) {
+    queue.push(edges[i]);
+  }
+
+  EXPECT_EQ(queue.size(), n_edges);
+  for (int i = 0; i < n_edges; ++i) {
+    EXPECT_EQ(queue.top(), edges[n_edges - 1 - i]);
+    queue.pop();
+  }
+  EXPECT_TRUE(queue.empty());
+
+  // When there is ambiguity, the lowest edge id comes first
+  for (int i = 0; i < n_edges; ++i) {
+    edges[i]->set_critical_path_weight(0);
+  }
+
+  queue.push(edges[1]);
+  queue.push(edges[2]);
+  queue.push(edges[0]);
+
+  for (int i = 0; i < n_edges; ++i) {
+    EXPECT_EQ(queue.top(), edges[i]);
+    queue.pop();
+  }
+  EXPECT_TRUE(queue.empty());
+}
+
+
diff --git a/src/hash_map.h b/src/hash_map.h
index 55d2c9d..4353609 100644
--- a/src/hash_map.h
+++ b/src/hash_map.h
@@ -53,7 +53,6 @@
   return h;
 }
 
-#if (__cplusplus >= 201103L) || (_MSC_VER >= 1900)
 #include <unordered_map>
 
 namespace std {
@@ -68,56 +67,13 @@
 };
 }
 
-#elif defined(_MSC_VER)
-#include <hash_map>
-
-using stdext::hash_map;
-using stdext::hash_compare;
-
-struct StringPieceCmp : public hash_compare<StringPiece> {
-  size_t operator()(const StringPiece& key) const {
-    return MurmurHash2(key.str_, key.len_);
-  }
-  bool operator()(const StringPiece& a, const StringPiece& b) const {
-    int cmp = memcmp(a.str_, b.str_, min(a.len_, b.len_));
-    if (cmp < 0) {
-      return true;
-    } else if (cmp > 0) {
-      return false;
-    } else {
-      return a.len_ < b.len_;
-    }
-  }
-};
-
-#else
-#include <ext/hash_map>
-
-using __gnu_cxx::hash_map;
-
-namespace __gnu_cxx {
-template<>
-struct hash<StringPiece> {
-  size_t operator()(StringPiece key) const {
-    return MurmurHash2(key.str_, key.len_);
-  }
-};
-}
-#endif
-
 /// A template for hash_maps keyed by a StringPiece whose string is
 /// owned externally (typically by the values).  Use like:
 /// ExternalStringHash<Foo*>::Type foos; to make foos into a hash
 /// mapping StringPiece => Foo*.
 template<typename V>
 struct ExternalStringHashMap {
-#if (__cplusplus >= 201103L) || (_MSC_VER >= 1900)
   typedef std::unordered_map<StringPiece, V> Type;
-#elif defined(_MSC_VER)
-  typedef hash_map<StringPiece, V, StringPieceCmp> Type;
-#else
-  typedef hash_map<StringPiece, V> Type;
-#endif
 };
 
 #endif // NINJA_MAP_H_
diff --git a/src/includes_normalize.h b/src/includes_normalize.h
index 7d50556..8d29a64 100644
--- a/src/includes_normalize.h
+++ b/src/includes_normalize.h
@@ -12,6 +12,9 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+#ifndef INCLUDES_NORMALIZE_H_
+#define INCLUDES_NORMALIZE_H_
+
 #include <string>
 #include <vector>
 
@@ -38,3 +41,5 @@
   std::string relative_to_;
   std::vector<StringPiece> split_relative_to_;
 };
+
+#endif  // INCLUDES_NORMALIZE_H_
diff --git a/src/includes_normalize_test.cc b/src/includes_normalize_test.cc
index 9214f53..12965f9 100644
--- a/src/includes_normalize_test.cc
+++ b/src/includes_normalize_test.cc
@@ -117,10 +117,10 @@
   // Construct max size path having cwd prefix.
   // kExactlyMaxPath = "$cwd\\a\\aaaa...aaaa\0";
   char kExactlyMaxPath[_MAX_PATH + 1];
-  ASSERT_NE(_getcwd(kExactlyMaxPath, sizeof kExactlyMaxPath), NULL);
+  ASSERT_STRNE(_getcwd(kExactlyMaxPath, sizeof kExactlyMaxPath), NULL);
 
   int cwd_len = strlen(kExactlyMaxPath);
-  ASSERT_LE(cwd_len + 3 + 1, _MAX_PATH)
+  ASSERT_LE(cwd_len + 3 + 1, _MAX_PATH);
   kExactlyMaxPath[cwd_len] = '\\';
   kExactlyMaxPath[cwd_len + 1] = 'a';
   kExactlyMaxPath[cwd_len + 2] = '\\';
diff --git a/src/line_printer.cc b/src/line_printer.cc
index a3d0528..12e82b3 100644
--- a/src/line_printer.cc
+++ b/src/line_printer.cc
@@ -46,10 +46,6 @@
   }
 #endif
   supports_color_ = smart_terminal_;
-  if (!supports_color_) {
-    const char* clicolor_force = getenv("CLICOLOR_FORCE");
-    supports_color_ = clicolor_force && string(clicolor_force) != "0";
-  }
 #ifdef _WIN32
   // Try enabling ANSI escape sequence support on Windows 10 terminals.
   if (supports_color_) {
@@ -61,6 +57,10 @@
     }
   }
 #endif
+  if (!supports_color_) {
+    const char* clicolor_force = getenv("CLICOLOR_FORCE");
+    supports_color_ = clicolor_force && std::string(clicolor_force) != "0";
+  }
 }
 
 void LinePrinter::Print(string to_print, LineType type) {
@@ -118,6 +118,7 @@
     have_blank_line_ = false;
   } else {
     printf("%s\n", to_print.c_str());
+    fflush(stdout);
   }
 }
 
diff --git a/src/manifest_parser.cc b/src/manifest_parser.cc
index 8db6eb3..c4b2980 100644
--- a/src/manifest_parser.cc
+++ b/src/manifest_parser.cc
@@ -14,8 +14,10 @@
 
 #include "manifest_parser.h"
 
+#include <assert.h>
 #include <stdio.h>
 #include <stdlib.h>
+
 #include <vector>
 
 #include "graph.h"
@@ -334,20 +336,9 @@
       return lexer_.Error("empty path", err);
     uint64_t slash_bits;
     CanonicalizePath(&path, &slash_bits);
-    if (!state_->AddOut(edge, path, slash_bits)) {
-      if (options_.dupe_edge_action_ == kDupeEdgeActionError) {
-        lexer_.Error("multiple rules generate " + path, err);
-        return false;
-      } else {
-        if (!quiet_) {
-          Warning(
-              "multiple rules generate %s. builds involving this target will "
-              "not be correct; continuing anyway",
-              path.c_str());
-        }
-        if (e - i <= static_cast<size_t>(implicit_outs))
-          --implicit_outs;
-      }
+    if (!state_->AddOut(edge, path, slash_bits, err)) {
+      lexer_.Error(std::string(*err), err);
+      return false;
     }
   }
 
@@ -416,6 +407,7 @@
     if (dgi == edge->inputs_.end()) {
       return lexer_.Error("dyndep '" + dyndep + "' is not an input", err);
     }
+    assert(!edge->dyndep_->generated_by_dep_loader());
   }
 
   return true;
diff --git a/src/manifest_parser.h b/src/manifest_parser.h
index 954cf46..db6812d 100644
--- a/src/manifest_parser.h
+++ b/src/manifest_parser.h
@@ -31,11 +31,7 @@
 };
 
 struct ManifestParserOptions {
-  ManifestParserOptions()
-      : dupe_edge_action_(kDupeEdgeActionWarn),
-        phony_cycle_action_(kPhonyCycleActionWarn) {}
-  DupeEdgeAction dupe_edge_action_;
-  PhonyCycleAction phony_cycle_action_;
+  PhonyCycleAction phony_cycle_action_ = kPhonyCycleActionWarn;
 };
 
 /// Parses .ninja files.
diff --git a/src/manifest_parser_test.cc b/src/manifest_parser_test.cc
index 66b72e2..c5a1fe8 100644
--- a/src/manifest_parser_test.cc
+++ b/src/manifest_parser_test.cc
@@ -330,29 +330,6 @@
 }
 #endif
 
-TEST_F(ParserTest, DuplicateEdgeWithMultipleOutputs) {
-  ASSERT_NO_FATAL_FAILURE(AssertParse(
-"rule cat\n"
-"  command = cat $in > $out\n"
-"build out1 out2: cat in1\n"
-"build out1: cat in2\n"
-"build final: cat out1\n"
-));
-  // AssertParse() checks that the generated build graph is self-consistent.
-  // That's all the checking that this test needs.
-}
-
-TEST_F(ParserTest, NoDeadPointerFromDuplicateEdge) {
-  ASSERT_NO_FATAL_FAILURE(AssertParse(
-"rule cat\n"
-"  command = cat $in > $out\n"
-"build out: cat in\n"
-"build out: cat in\n"
-));
-  // AssertParse() checks that the generated build graph is self-consistent.
-  // That's all the checking that this test needs.
-}
-
 TEST_F(ParserTest, DuplicateEdgeWithMultipleOutputsError) {
   const char kInput[] =
 "rule cat\n"
@@ -360,9 +337,7 @@
 "build out1 out2: cat in1\n"
 "build out1: cat in2\n"
 "build final: cat out1\n";
-  ManifestParserOptions parser_opts;
-  parser_opts.dupe_edge_action_ = kDupeEdgeActionError;
-  ManifestParser parser(&state, &fs_, parser_opts);
+  ManifestParser parser(&state, &fs_);
   string err;
   EXPECT_FALSE(parser.ParseTest(kInput, &err));
   EXPECT_EQ("input:5: multiple rules generate out1\n", err);
@@ -377,9 +352,7 @@
     "build final: cat out1\n");
   const char kInput[] =
     "subninja sub.ninja\n";
-  ManifestParserOptions parser_opts;
-  parser_opts.dupe_edge_action_ = kDupeEdgeActionError;
-  ManifestParser parser(&state, &fs_, parser_opts);
+  ManifestParser parser(&state, &fs_);
   string err;
   EXPECT_FALSE(parser.ParseTest(kInput, &err));
   EXPECT_EQ("sub.ninja:5: multiple rules generate out1\n", err);
@@ -997,28 +970,26 @@
   EXPECT_FALSE(edge->is_implicit_out(0));
 }
 
-TEST_F(ParserTest, ImplicitOutputDupe) {
-  ASSERT_NO_FATAL_FAILURE(AssertParse(
+TEST_F(ParserTest, ImplicitOutputDupeError) {
+  const char kInput[] =
 "rule cat\n"
 "  command = cat $in > $out\n"
-"build foo baz | foo baq foo: cat bar\n"));
-
-  Edge* edge = state.LookupNode("foo")->in_edge();
-  ASSERT_EQ(edge->outputs_.size(), 3);
-  EXPECT_FALSE(edge->is_implicit_out(0));
-  EXPECT_FALSE(edge->is_implicit_out(1));
-  EXPECT_TRUE(edge->is_implicit_out(2));
+"build foo baz | foo baq foo: cat bar\n";
+  ManifestParser parser(&state, &fs_);
+  string err;
+  EXPECT_FALSE(parser.ParseTest(kInput, &err));
+  EXPECT_EQ("input:4: foo is defined as an output multiple times\n", err);
 }
 
-TEST_F(ParserTest, ImplicitOutputDupes) {
-  ASSERT_NO_FATAL_FAILURE(AssertParse(
+TEST_F(ParserTest, ImplicitOutputDupesError) {
+  const char kInput[] =
 "rule cat\n"
 "  command = cat $in > $out\n"
-"build foo foo foo | foo foo foo foo: cat bar\n"));
-
-  Edge* edge = state.LookupNode("foo")->in_edge();
-  ASSERT_EQ(edge->outputs_.size(), 1);
-  EXPECT_FALSE(edge->is_implicit_out(0));
+"build foo foo foo | foo foo foo foo: cat bar\n";
+  ManifestParser parser(&state, &fs_);
+  string err;
+  EXPECT_FALSE(parser.ParseTest(kInput, &err));
+  EXPECT_EQ("input:4: foo is defined as an output multiple times\n", err);
 }
 
 TEST_F(ParserTest, NoExplicitOutput) {
diff --git a/src/metrics.cc b/src/metrics.cc
index dbaf221..632ae43 100644
--- a/src/metrics.cc
+++ b/src/metrics.cc
@@ -18,13 +18,8 @@
 #include <stdio.h>
 #include <string.h>
 
-#ifndef _WIN32
-#include <sys/time.h>
-#else
-#include <windows.h>
-#endif
-
 #include <algorithm>
+#include <chrono>
 
 #include "util.h"
 
@@ -34,49 +29,40 @@
 
 namespace {
 
-#ifndef _WIN32
 /// Compute a platform-specific high-res timer value that fits into an int64.
 int64_t HighResTimer() {
-  timeval tv;
-  if (gettimeofday(&tv, NULL) < 0)
-    Fatal("gettimeofday: %s", strerror(errno));
-  return (int64_t)tv.tv_sec * 1000*1000 + tv.tv_usec;
+  auto now = chrono::steady_clock::now();
+  return chrono::duration_cast<chrono::steady_clock::duration>(
+             now.time_since_epoch())
+      .count();
 }
 
-/// Convert a delta of HighResTimer() values to microseconds.
-int64_t TimerToMicros(int64_t dt) {
-  // No conversion necessary.
-  return dt;
-}
-#else
-int64_t LargeIntegerToInt64(const LARGE_INTEGER& i) {
-  return ((int64_t)i.HighPart) << 32 | i.LowPart;
-}
-
-int64_t HighResTimer() {
-  LARGE_INTEGER counter;
-  if (!QueryPerformanceCounter(&counter))
-    Fatal("QueryPerformanceCounter: %s", GetLastErrorString().c_str());
-  return LargeIntegerToInt64(counter);
+constexpr int64_t GetFrequency() {
+  // If numerator isn't 1 then we lose precision and that will need to be
+  // assessed.
+  static_assert(std::chrono::steady_clock::period::num == 1,
+                "Numerator must be 1");
+  return std::chrono::steady_clock::period::den /
+         std::chrono::steady_clock::period::num;
 }
 
 int64_t TimerToMicros(int64_t dt) {
-  static int64_t ticks_per_sec = 0;
-  if (!ticks_per_sec) {
-    LARGE_INTEGER freq;
-    if (!QueryPerformanceFrequency(&freq))
-      Fatal("QueryPerformanceFrequency: %s", GetLastErrorString().c_str());
-    ticks_per_sec = LargeIntegerToInt64(freq);
-  }
-
   // dt is in ticks.  We want microseconds.
-  return (dt * 1000000) / ticks_per_sec;
+  return chrono::duration_cast<chrono::microseconds>(
+             std::chrono::steady_clock::duration{ dt })
+      .count();
 }
-#endif
+
+int64_t TimerToMicros(double dt) {
+  // dt is in ticks.  We want microseconds.
+  using DoubleSteadyClock =
+      std::chrono::duration<double, std::chrono::steady_clock::period>;
+  return chrono::duration_cast<chrono::microseconds>(DoubleSteadyClock{ dt })
+      .count();
+}
 
 }  // anonymous namespace
 
-
 ScopedMetric::ScopedMetric(Metric* metric) {
   metric_ = metric;
   if (!metric_)
@@ -87,7 +73,9 @@
   if (!metric_)
     return;
   metric_->count++;
-  int64_t dt = TimerToMicros(HighResTimer() - start_);
+  // Leave in the timer's natural frequency to avoid paying the conversion cost
+  // on every measurement.
+  int64_t dt = HighResTimer() - start_;
   metric_->sum += dt;
 }
 
@@ -112,18 +100,23 @@
   for (vector<Metric*>::iterator i = metrics_.begin();
        i != metrics_.end(); ++i) {
     Metric* metric = *i;
-    double total = metric->sum / (double)1000;
-    double avg = metric->sum / (double)metric->count;
+    uint64_t micros = TimerToMicros(metric->sum);
+    double total = micros / (double)1000;
+    double avg = micros / (double)metric->count;
     printf("%-*s\t%-6d\t%-8.1f\t%.1f\n", width, metric->name.c_str(),
            metric->count, avg, total);
   }
 }
 
-uint64_t Stopwatch::Now() const {
-  return TimerToMicros(HighResTimer());
+double Stopwatch::Elapsed() const {
+  // Convert to micros after converting to double to minimize error.
+  return 1e-6 * TimerToMicros(static_cast<double>(NowRaw() - started_));
+}
+
+uint64_t Stopwatch::NowRaw() const {
+  return HighResTimer();
 }
 
 int64_t GetTimeMillis() {
   return TimerToMicros(HighResTimer()) / 1000;
 }
-
diff --git a/src/metrics.h b/src/metrics.h
index 11239b5..937d905 100644
--- a/src/metrics.h
+++ b/src/metrics.h
@@ -28,11 +28,10 @@
   std::string name;
   /// Number of times we've hit the code path.
   int count;
-  /// Total time (in micros) we've spent on the code path.
+  /// Total time (in platform-dependent units) we've spent on the code path.
   int64_t sum;
 };
 
-
 /// A scoped object for recording a metric across the body of a function.
 /// Used by the METRIC_RECORD macro.
 struct ScopedMetric {
@@ -68,15 +67,15 @@
   Stopwatch() : started_(0) {}
 
   /// Seconds since Restart() call.
-  double Elapsed() const {
-    return 1e-6 * static_cast<double>(Now() - started_);
-  }
+  double Elapsed() const;
 
-  void Restart() { started_ = Now(); }
+  void Restart() { started_ = NowRaw(); }
 
  private:
   uint64_t started_;
-  uint64_t Now() const;
+  // Return the current time using the native frequency of the high resolution
+  // timer.
+  uint64_t NowRaw() const;
 };
 
 /// The primary interface to metrics.  Use METRIC_RECORD("foobar") at the top
@@ -86,6 +85,13 @@
       g_metrics ? g_metrics->NewMetric(name) : NULL;                    \
   ScopedMetric metrics_h_scoped(metrics_h_metric);
 
+/// A variant of METRIC_RECORD that doesn't record anything if |condition|
+/// is false.
+#define METRIC_RECORD_IF(name, condition)            \
+  static Metric* metrics_h_metric =                  \
+      g_metrics ? g_metrics->NewMetric(name) : NULL; \
+  ScopedMetric metrics_h_scoped((condition) ? metrics_h_metric : NULL);
+
 extern Metrics* g_metrics;
 
 #endif // NINJA_METRICS_H_
diff --git a/src/missing_deps.h b/src/missing_deps.h
index ae57074..7a615da 100644
--- a/src/missing_deps.h
+++ b/src/missing_deps.h
@@ -19,9 +19,7 @@
 #include <set>
 #include <string>
 
-#if __cplusplus >= 201103L
 #include <unordered_map>
-#endif
 
 struct DepsLog;
 struct DiskInterface;
@@ -68,13 +66,8 @@
   int missing_dep_path_count_;
 
  private:
-#if __cplusplus >= 201103L
   using InnerAdjacencyMap = std::unordered_map<Edge*, bool>;
   using AdjacencyMap = std::unordered_map<Edge*, InnerAdjacencyMap>;
-#else
-  typedef std::map<Edge*, bool> InnerAdjacencyMap;
-  typedef std::map<Edge*, InnerAdjacencyMap> AdjacencyMap;
-#endif
   AdjacencyMap adjacency_map_;
 };
 
diff --git a/src/missing_deps_test.cc b/src/missing_deps_test.cc
index db66885..dae377b 100644
--- a/src/missing_deps_test.cc
+++ b/src/missing_deps_test.cc
@@ -33,7 +33,12 @@
         scanner_(&delegate_, &deps_log_, &state_, &filesystem_) {
     std::string err;
     deps_log_.OpenForWrite(kTestDepsLogFilename, &err);
-    ASSERT_EQ("", err);
+    EXPECT_EQ("", err);
+  }
+
+  ~MissingDependencyScannerTest() {
+    // Remove test file.
+    deps_log_.Close();
   }
 
   MissingDependencyScanner& scanner() { return scanner_; }
@@ -59,9 +64,9 @@
     compile_rule_.AddBinding("deps", deps_type);
     generator_rule_.AddBinding("deps", deps_type);
     Edge* header_edge = state_.AddEdge(&generator_rule_);
-    state_.AddOut(header_edge, "generated_header", 0);
+    state_.AddOut(header_edge, "generated_header", 0, nullptr);
     Edge* compile_edge = state_.AddEdge(&compile_rule_);
-    state_.AddOut(compile_edge, "compiled_object", 0);
+    state_.AddOut(compile_edge, "compiled_object", 0, nullptr);
   }
 
   void CreateGraphDependencyBetween(const char* from, const char* to) {
@@ -79,6 +84,7 @@
     ASSERT_EQ(1u, scanner().generator_rules_.count(rule));
   }
 
+  ScopedFilePath scoped_file_path_ = kTestDepsLogFilename;
   MissingDependencyTestDelegate delegate_;
   Rule generator_rule_;
   Rule compile_rule_;
@@ -124,7 +130,7 @@
   CreateInitialState();
   // Adding an indirect dependency also fixes the issue
   Edge* intermediate_edge = state_.AddEdge(&generator_rule_);
-  state_.AddOut(intermediate_edge, "intermediate", 0);
+  state_.AddOut(intermediate_edge, "intermediate", 0, nullptr);
   CreateGraphDependencyBetween("compiled_object", "intermediate");
   CreateGraphDependencyBetween("intermediate", "generated_header");
   RecordDepsLogDep("compiled_object", "generated_header");
@@ -159,4 +165,3 @@
   std::vector<Node*> nodes = state_.RootNodes(&err);
   ASSERT_NE("", err);
 }
-
diff --git a/src/msvc_helper.h b/src/msvc_helper.h
index 568b9f9..699b0a1 100644
--- a/src/msvc_helper.h
+++ b/src/msvc_helper.h
@@ -12,6 +12,9 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+#ifndef MSVC_HELPER_H_
+#define MSVC_HELPER_H_
+
 #include <string>
 
 std::string EscapeForDepfile(const std::string& path);
@@ -30,3 +33,5 @@
 
   void* env_block_;
 };
+
+#endif  // MSVC_HELPER_H_
diff --git a/src/ninja.cc b/src/ninja.cc
index 2b71eb1..ce1beda 100644
--- a/src/ninja.cc
+++ b/src/ninja.cc
@@ -77,9 +77,6 @@
   /// Tool to run rather than building.
   const Tool* tool;
 
-  /// Whether duplicate rules for one target should warn or print an error.
-  bool dupe_edges_should_err;
-
   /// Whether phony cycles should warn or print an error.
   bool phony_cycle_should_err;
 };
@@ -156,6 +153,10 @@
   /// @return true if the manifest was rebuilt.
   bool RebuildManifest(const char* input_file, string* err, Status* status);
 
+  /// For each edge, lookup in build log how long it took last time,
+  /// and record that in the edge itself. It will be used for ETA predicton.
+  void ParsePreviousElapsedTimes();
+
   /// Build the targets listed on the command line.
   /// @return an exit code.
   int RunBuild(int argc, char** argv, Status* status);
@@ -289,6 +290,19 @@
   return true;
 }
 
+void NinjaMain::ParsePreviousElapsedTimes() {
+  for (Edge* edge : state_.edges_) {
+    for (Node* out : edge->outputs_) {
+      BuildLog::LogEntry* log_entry = build_log_.LookupByOutput(out->path());
+      if (!log_entry)
+        continue;  // Maybe we'll have log entry for next output of this edge?
+      edge->prev_elapsed_time_millis =
+          log_entry->end_time - log_entry->start_time;
+      break;  // Onto next edge.
+    }
+  }
+}
+
 Node* NinjaMain::CollectTarget(const char* cpath, string* err) {
   string path = cpath;
   if (path.empty()) {
@@ -532,7 +546,7 @@
   if (argc == 0) {
     for (vector<Node*>::const_iterator ni = deps_log_.nodes().begin();
          ni != deps_log_.nodes().end(); ++ni) {
-      if (deps_log_.IsDepsEntryLiveFor(*ni))
+      if (DepsLog::IsDepsEntryLiveFor(*ni))
         nodes.push_back(*ni);
     }
   } else {
@@ -672,6 +686,7 @@
       }
     }
     printf("\n");
+    fflush(stdout);
   }
   return 0;
 }
@@ -881,7 +896,10 @@
     return command;
 
   size_t index = command.find(rspfile);
-  if (index == 0 || index == string::npos || command[index - 1] != '@')
+  if (index == 0 || index == string::npos ||
+      (command[index - 1] != '@' &&
+       command.find("--option-file=") != index - 14 &&
+       command.find("-f ") != index - 3))
     return command;
 
   string rspfile_content = edge->GetBinding("rspfile_content");
@@ -891,7 +909,13 @@
     rspfile_content.replace(newline_index, 1, 1, ' ');
     ++newline_index;
   }
-  command.replace(index - 1, rspfile.length() + 1, rspfile_content);
+  if (command[index - 1] == '@') {
+    command.replace(index - 1, rspfile.length() + 1, rspfile_content);
+  } else if (command.find("-f ") == index - 3) {
+    command.replace(index - 3, rspfile.length() + 3, rspfile_content);
+  } else {  // --option-file syntax
+    command.replace(index - 14, rspfile.length() + 14, rspfile_content);
+  }
   return command;
 }
 
@@ -1200,12 +1224,6 @@
 "  phonycycle={err,warn}  phony build statement references itself\n"
     );
     return false;
-  } else if (name == "dupbuild=err") {
-    options->dupe_edges_should_err = true;
-    return true;
-  } else if (name == "dupbuild=warn") {
-    options->dupe_edges_should_err = false;
-    return true;
   } else if (name == "phonycycle=err") {
     options->phony_cycle_should_err = true;
     return true;
@@ -1217,9 +1235,8 @@
     Warning("deprecated warning 'depfilemulti'");
     return true;
   } else {
-    const char* suggestion =
-        SpellcheckString(name.c_str(), "dupbuild=err", "dupbuild=warn",
-                         "phonycycle=err", "phonycycle=warn", NULL);
+    const char* suggestion = SpellcheckString(name.c_str(), "phonycycle=err",
+                                              "phonycycle=warn", nullptr);
     if (suggestion) {
       Error("unknown warning flag '%s', did you mean '%s'?",
             name.c_str(), suggestion);
@@ -1356,7 +1373,9 @@
   disk_interface_.AllowStatCache(false);
 
   if (builder.AlreadyUpToDate()) {
-    status->Info("no work to do.");
+    if (config_.verbosity != BuildConfig::NO_STATUS_UPDATE) {
+      status->Info("no work to do.");
+    }
     return 0;
   }
 
@@ -1400,7 +1419,7 @@
   BuildConfig* config;
 
   DeferGuessParallelism(BuildConfig* config)
-      : config(config), needGuess(true) {}
+      : needGuess(true), config(config) {}
 
   void Refresh() {
     if (needGuess) {
@@ -1513,7 +1532,6 @@
   BuildConfig config;
   Options options = {};
   options.input_file = "build.ninja";
-  options.dupe_edges_should_err = true;
 
   setvbuf(stdout, NULL, _IOLBF, BUFSIZ);
   const char* ninja_command = argv[0];
@@ -1550,9 +1568,6 @@
     NinjaMain ninja(ninja_command, config);
 
     ManifestParserOptions parser_opts;
-    if (options.dupe_edges_should_err) {
-      parser_opts.dupe_edge_action_ = kDupeEdgeActionError;
-    }
     if (options.phony_cycle_should_err) {
       parser_opts.phony_cycle_action_ = kPhonyCycleActionError;
     }
@@ -1588,6 +1603,8 @@
       exit(1);
     }
 
+    ninja.ParsePreviousElapsedTimes();
+
     int result = ninja.RunBuild(argc, argv, status);
     if (g_metrics)
       ninja.DumpMetrics();
diff --git a/src/ninja_test.cc b/src/ninja_test.cc
index 6720dec..7616c85 100644
--- a/src/ninja_test.cc
+++ b/src/ninja_test.cc
@@ -12,151 +12,9 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-#include <stdarg.h>
-#include <stdio.h>
-#include <stdlib.h>
-
-#ifdef _WIN32
-#include "getopt.h"
-#elif defined(_AIX)
-#include "getopt.h"
-#include <unistd.h>
-#else
-#include <getopt.h>
-#endif
-
-#include "test.h"
-#include "line_printer.h"
-
-using namespace std;
-
-struct RegisteredTest {
-  testing::Test* (*factory)();
-  const char *name;
-  bool should_run;
-};
-// This can't be a vector because tests call RegisterTest from static
-// initializers and the order static initializers run it isn't specified. So
-// the vector constructor isn't guaranteed to run before all of the
-// RegisterTest() calls.
-static RegisteredTest tests[10000];
-testing::Test* g_current_test;
-static int ntests;
-static LinePrinter printer;
-
-void RegisterTest(testing::Test* (*factory)(), const char* name) {
-  tests[ntests].factory = factory;
-  tests[ntests++].name = name;
-}
-
-namespace {
-string StringPrintf(const char* format, ...) {
-  const int N = 1024;
-  char buf[N];
-
-  va_list ap;
-  va_start(ap, format);
-  vsnprintf(buf, N, format, ap);
-  va_end(ap);
-
-  return buf;
-}
-
-void Usage() {
-  fprintf(stderr,
-"usage: ninja_tests [options]\n"
-"\n"
-"options:\n"
-"  --gtest_filter=POSITIVE_PATTERN[-NEGATIVE_PATTERN]\n"
-"      Run tests whose names match the positive but not the negative pattern.\n"
-"      '*' matches any substring. (gtest's ':', '?' are not implemented).\n");
-}
-
-bool PatternMatchesString(const char* pattern, const char* str) {
-  switch (*pattern) {
-    case '\0':
-    case '-': return *str == '\0';
-    case '*': return (*str != '\0' && PatternMatchesString(pattern, str + 1)) ||
-                     PatternMatchesString(pattern + 1, str);
-    default:  return *pattern == *str &&
-                     PatternMatchesString(pattern + 1, str + 1);
-  }
-}
-
-bool TestMatchesFilter(const char* test, const char* filter) {
-  // Split --gtest_filter at '-' into positive and negative filters.
-  const char* const dash = strchr(filter, '-');
-  const char* pos = dash == filter ? "*" : filter; //Treat '-test1' as '*-test1'
-  const char* neg = dash ? dash + 1 : "";
-  return PatternMatchesString(pos, test) && !PatternMatchesString(neg, test);
-}
-
-bool ReadFlags(int* argc, char*** argv, const char** test_filter) {
-  enum { OPT_GTEST_FILTER = 1 };
-  const option kLongOptions[] = {
-    { "gtest_filter", required_argument, NULL, OPT_GTEST_FILTER },
-    { NULL, 0, NULL, 0 }
-  };
-
-  int opt;
-  while ((opt = getopt_long(*argc, *argv, "h", kLongOptions, NULL)) != -1) {
-    switch (opt) {
-    case OPT_GTEST_FILTER:
-      if (strchr(optarg, '?') == NULL && strchr(optarg, ':') == NULL) {
-        *test_filter = optarg;
-        break;
-      }  // else fall through.
-    default:
-      Usage();
-      return false;
-    }
-  }
-  *argv += optind;
-  *argc -= optind;
-  return true;
-}
-
-}  // namespace
-
-bool testing::Test::Check(bool condition, const char* file, int line,
-                          const char* error) {
-  if (!condition) {
-    printer.PrintOnNewLine(
-        StringPrintf("*** Failure in %s:%d\n%s\n", file, line, error));
-    failed_ = true;
-  }
-  return condition;
-}
+#include <gtest/gtest.h>
 
 int main(int argc, char **argv) {
-  int tests_started = 0;
-
-  const char* test_filter = "*";
-  if (!ReadFlags(&argc, &argv, &test_filter))
-    return 1;
-
-  int nactivetests = 0;
-  for (int i = 0; i < ntests; i++)
-    if ((tests[i].should_run = TestMatchesFilter(tests[i].name, test_filter)))
-      ++nactivetests;
-
-  bool passed = true;
-  for (int i = 0; i < ntests; i++) {
-    if (!tests[i].should_run) continue;
-
-    ++tests_started;
-    testing::Test* test = tests[i].factory();
-    printer.Print(
-        StringPrintf("[%d/%d] %s", tests_started, nactivetests, tests[i].name),
-        LinePrinter::ELIDE);
-    test->SetUp();
-    test->Run();
-    test->TearDown();
-    if (test->Failed())
-      passed = false;
-    delete test;
-  }
-
-  printer.PrintOnNewLine(passed ? "passed\n" : "failed\n");
-  return passed ? EXIT_SUCCESS : EXIT_FAILURE;
+  testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
 }
diff --git a/src/parser.cc b/src/parser.cc
index 756922d..139a347 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -20,7 +20,10 @@
 using namespace std;
 
 bool Parser::Load(const string& filename, string* err, Lexer* parent) {
-  METRIC_RECORD(".ninja parse");
+  // If |parent| is not NULL, metrics collection has been started by a parent
+  // Parser::Load() in our call stack. Do not start a new one here to avoid
+  // over-counting parsing times.
+  METRIC_RECORD_IF(".ninja parse", parent == NULL);
   string contents;
   string read_err;
   if (file_reader_->ReadFile(filename, &contents, &read_err) !=
@@ -31,13 +34,6 @@
     return false;
   }
 
-  // The lexer needs a nul byte at the end of its input, to know when it's done.
-  // It takes a StringPiece, and StringPiece's string constructor uses
-  // string::data().  data()'s return value isn't guaranteed to be
-  // null-terminated (although in practice - libc++, libstdc++, msvc's stl --
-  // it is, and C++11 demands that too), so add an explicit nul byte.
-  contents.resize(contents.size() + 1);
-
   return Parse(filename, contents, err);
 }
 
diff --git a/src/state.cc b/src/state.cc
index 556b0d8..5fec5e1 100644
--- a/src/state.cc
+++ b/src/state.cc
@@ -38,13 +38,13 @@
   delayed_.insert(edge);
 }
 
-void Pool::RetrieveReadyEdges(EdgeSet* ready_queue) {
+void Pool::RetrieveReadyEdges(EdgePriorityQueue* ready_queue) {
   DelayedEdges::iterator it = delayed_.begin();
   while (it != delayed_.end()) {
     Edge* edge = *it;
     if (current_use_ + edge->weight() > depth_)
       break;
-    ready_queue->insert(edge);
+    ready_queue->push(edge);
     EdgeScheduled(*edge);
     ++it;
   }
@@ -128,16 +128,25 @@
 
 void State::AddIn(Edge* edge, StringPiece path, uint64_t slash_bits) {
   Node* node = GetNode(path, slash_bits);
+  node->set_generated_by_dep_loader(false);
   edge->inputs_.push_back(node);
   node->AddOutEdge(edge);
 }
 
-bool State::AddOut(Edge* edge, StringPiece path, uint64_t slash_bits) {
+bool State::AddOut(Edge* edge, StringPiece path, uint64_t slash_bits,
+                   std::string* err) {
   Node* node = GetNode(path, slash_bits);
-  if (node->in_edge())
+  if (Edge* other = node->in_edge()) {
+    if (other == edge) {
+      *err = path.AsString() + " is defined as an output multiple times";
+    } else {
+      *err = "multiple rules generate " + path.AsString();
+    }
     return false;
+  }
   edge->outputs_.push_back(node);
   node->set_in_edge(edge);
+  node->set_generated_by_dep_loader(false);
   return true;
 }
 
@@ -145,6 +154,7 @@
   Node* node = GetNode(path, slash_bits);
   edge->validations_.push_back(node);
   node->AddValidationOutEdge(edge);
+  node->set_generated_by_dep_loader(false);
 }
 
 bool State::AddDefault(StringPiece path, string* err) {
diff --git a/src/state.h b/src/state.h
index 878ac6d..8789cb1 100644
--- a/src/state.h
+++ b/src/state.h
@@ -62,7 +62,7 @@
   void DelayEdge(Edge* edge);
 
   /// Pool will add zero or more edges to the ready_queue
-  void RetrieveReadyEdges(EdgeSet* ready_queue);
+  void RetrieveReadyEdges(EdgePriorityQueue* ready_queue);
 
   /// Dump the Pool and its edges (useful for debugging).
   void Dump() const;
@@ -80,7 +80,10 @@
       if (!a) return b;
       if (!b) return false;
       int weight_diff = a->weight() - b->weight();
-      return ((weight_diff < 0) || (weight_diff == 0 && EdgeCmp()(a, b)));
+      if (weight_diff != 0) {
+        return weight_diff < 0;
+      }
+      return EdgePriorityGreater()(a, b);
     }
   };
 
@@ -105,8 +108,11 @@
   Node* LookupNode(StringPiece path) const;
   Node* SpellcheckNode(const std::string& path);
 
+  /// Add input / output / validation nodes to a given edge. This also
+  /// ensures that the generated_by_dep_loader() flag for all these nodes
+  /// is set to false, to indicate that they come from the input manifest.
   void AddIn(Edge* edge, StringPiece path, uint64_t slash_bits);
-  bool AddOut(Edge* edge, StringPiece path, uint64_t slash_bits);
+  bool AddOut(Edge* edge, StringPiece path, uint64_t slash_bits, std::string* err);
   void AddValidation(Edge* edge, StringPiece path, uint64_t slash_bits);
   bool AddDefault(StringPiece path, std::string* error);
 
diff --git a/src/state_test.cc b/src/state_test.cc
index 96469f9..e0e3060 100644
--- a/src/state_test.cc
+++ b/src/state_test.cc
@@ -36,7 +36,7 @@
   Edge* edge = state.AddEdge(rule);
   state.AddIn(edge, "in1", 0);
   state.AddIn(edge, "in2", 0);
-  state.AddOut(edge, "out", 0);
+  state.AddOut(edge, "out", 0, nullptr);
 
   EXPECT_EQ("cat in1 in2 > out", edge->EvaluateCommand());
 
diff --git a/src/status.cc b/src/status.cc
index 88b7781..06f3c20 100644
--- a/src/status.cc
+++ b/src/status.cc
@@ -14,6 +14,15 @@
 
 #include "status.h"
 
+#ifdef _WIN32
+#include "win32port.h"
+#else
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+#include <cinttypes>
+#endif
+
 #include <stdarg.h>
 #include <stdlib.h>
 
@@ -27,11 +36,9 @@
 using namespace std;
 
 StatusPrinter::StatusPrinter(const BuildConfig& config)
-    : config_(config),
-      started_edges_(0), finished_edges_(0), total_edges_(0), running_edges_(0),
-      time_millis_(0), progress_status_format_(NULL),
+    : config_(config), started_edges_(0), finished_edges_(0), total_edges_(0),
+      running_edges_(0), progress_status_format_(NULL),
       current_rate_(config.parallelism) {
-
   // Don't do anything fancy in verbose mode.
   if (config_.verbosity != BuildConfig::NORMAL)
     printer_.set_smart_terminal(false);
@@ -41,8 +48,32 @@
     progress_status_format_ = "[%f/%t] ";
 }
 
-void StatusPrinter::PlanHasTotalEdges(int total) {
-  total_edges_ = total;
+void StatusPrinter::EdgeAddedToPlan(const Edge* edge) {
+  ++total_edges_;
+
+  // Do we know how long did this edge take last time?
+  if (edge->prev_elapsed_time_millis != -1) {
+    ++eta_predictable_edges_total_;
+    ++eta_predictable_edges_remaining_;
+    eta_predictable_cpu_time_total_millis_ += edge->prev_elapsed_time_millis;
+    eta_predictable_cpu_time_remaining_millis_ +=
+        edge->prev_elapsed_time_millis;
+  } else
+    ++eta_unpredictable_edges_remaining_;
+}
+
+void StatusPrinter::EdgeRemovedFromPlan(const Edge* edge) {
+  --total_edges_;
+
+  // Do we know how long did this edge take last time?
+  if (edge->prev_elapsed_time_millis != -1) {
+    --eta_predictable_edges_total_;
+    --eta_predictable_edges_remaining_;
+    eta_predictable_cpu_time_total_millis_ -= edge->prev_elapsed_time_millis;
+    eta_predictable_cpu_time_remaining_millis_ -=
+        edge->prev_elapsed_time_millis;
+  } else
+    --eta_unpredictable_edges_remaining_;
 }
 
 void StatusPrinter::BuildEdgeStarted(const Edge* edge,
@@ -58,11 +89,102 @@
     printer_.SetConsoleLocked(true);
 }
 
-void StatusPrinter::BuildEdgeFinished(Edge* edge, int64_t end_time_millis,
-                                      bool success, const string& output) {
+void StatusPrinter::RecalculateProgressPrediction() {
+  time_predicted_percentage_ = 0.0;
+
+  // Sometimes, the previous and actual times may be wildly different.
+  // For example, the previous build may have been fully recovered from ccache,
+  // so it was blazing fast, while the new build no longer gets hits from ccache
+  // for whatever reason, so it actually compiles code, which takes much longer.
+  // We should detect such cases, and avoid using "wrong" previous times.
+
+  // Note that we will only use the previous times if there are edges with
+  // previous time knowledge remaining.
+  bool use_previous_times = eta_predictable_edges_remaining_ &&
+                            eta_predictable_cpu_time_remaining_millis_;
+
+  // Iff we have sufficient statistical information for the current run,
+  // that is, if we have took at least 15 sec AND finished at least 5% of edges,
+  // we can check whether our performance so far matches the previous one.
+  if (use_previous_times && total_edges_ && finished_edges_ &&
+      (time_millis_ >= 15 * 1e3) &&
+      (((double)finished_edges_ / total_edges_) >= 0.05)) {
+    // Over the edges we've just run, how long did they take on average?
+    double actual_average_cpu_time_millis =
+        (double)cpu_time_millis_ / finished_edges_;
+    // What is the previous average, for the edges with such knowledge?
+    double previous_average_cpu_time_millis =
+        (double)eta_predictable_cpu_time_total_millis_ /
+        eta_predictable_edges_total_;
+
+    double ratio = std::max(previous_average_cpu_time_millis,
+                            actual_average_cpu_time_millis) /
+                   std::min(previous_average_cpu_time_millis,
+                            actual_average_cpu_time_millis);
+
+    // Let's say that the average times should differ by less than 10x
+    use_previous_times = ratio < 10;
+  }
+
+  int edges_with_known_runtime = finished_edges_;
+  if (use_previous_times)
+    edges_with_known_runtime += eta_predictable_edges_remaining_;
+  if (edges_with_known_runtime == 0)
+    return;
+
+  int edges_with_unknown_runtime = use_previous_times
+                                       ? eta_unpredictable_edges_remaining_
+                                       : (total_edges_ - finished_edges_);
+
+  // Given the time elapsed on the edges we've just run,
+  // and the runtime of the edges for which we know previous runtime,
+  // what's the edge's average runtime?
+  int64_t edges_known_runtime_total_millis = cpu_time_millis_;
+  if (use_previous_times)
+    edges_known_runtime_total_millis +=
+        eta_predictable_cpu_time_remaining_millis_;
+
+  double average_cpu_time_millis =
+      (double)edges_known_runtime_total_millis / edges_with_known_runtime;
+
+  // For the edges for which we do not have the previous runtime,
+  // let's assume that their average runtime is the same as for the other edges,
+  // and we therefore can predict their remaining runtime.
+  double unpredictable_cpu_time_remaining_millis =
+      average_cpu_time_millis * edges_with_unknown_runtime;
+
+  // And therefore we can predict the remaining and total runtimes.
+  double total_cpu_time_remaining_millis =
+      unpredictable_cpu_time_remaining_millis;
+  if (use_previous_times)
+    total_cpu_time_remaining_millis +=
+        eta_predictable_cpu_time_remaining_millis_;
+  double total_cpu_time_millis =
+      cpu_time_millis_ + total_cpu_time_remaining_millis;
+  if (total_cpu_time_millis == 0.0)
+    return;
+
+  // After that we can tell how much work we've completed, in time units.
+  time_predicted_percentage_ = cpu_time_millis_ / total_cpu_time_millis;
+}
+
+void StatusPrinter::BuildEdgeFinished(Edge* edge, int64_t start_time_millis,
+                                      int64_t end_time_millis, bool success,
+                                      const string& output) {
   time_millis_ = end_time_millis;
   ++finished_edges_;
 
+  int64_t elapsed = end_time_millis - start_time_millis;
+  cpu_time_millis_ += elapsed;
+
+  // Do we know how long did this edge take last time?
+  if (edge->prev_elapsed_time_millis != -1) {
+    --eta_predictable_edges_remaining_;
+    eta_predictable_cpu_time_remaining_millis_ -=
+        edge->prev_elapsed_time_millis;
+  } else
+    --eta_unpredictable_edges_remaining_;
+
   if (edge->use_console())
     printer_.SetConsoleLocked(false);
 
@@ -201,16 +323,78 @@
         out += buf;
         break;
 
-        // Percentage
+        // Percentage of edges completed
       case 'p': {
-        int percent = (100 * finished_edges_) / total_edges_;
+        int percent = 0;
+        if (finished_edges_ != 0 && total_edges_ != 0)
+          percent = (100 * finished_edges_) / total_edges_;
         snprintf(buf, sizeof(buf), "%3i%%", percent);
         out += buf;
         break;
       }
 
-      case 'e': {
-        snprintf(buf, sizeof(buf), "%.3f", time_millis_ / 1e3);
+#define FORMAT_TIME_HMMSS(t)                                                \
+  "%" PRId64 ":%02" PRId64 ":%02" PRId64 "", (t) / 3600, ((t) % 3600) / 60, \
+      (t) % 60
+#define FORMAT_TIME_MMSS(t) "%02" PRId64 ":%02" PRId64 "", (t) / 60, (t) % 60
+
+        // Wall time
+      case 'e':  // elapsed, seconds
+      case 'w':  // elapsed, human-readable
+      case 'E':  // ETA, seconds
+      case 'W':  // ETA, human-readable
+      {
+        double elapsed_sec = time_millis_ / 1e3;
+        double eta_sec = -1;  // To be printed as "?".
+        if (time_predicted_percentage_ != 0.0) {
+          // So, we know that we've spent time_millis_ wall clock,
+          // and that is time_predicted_percentage_ percent.
+          // How much time will we need to complete 100%?
+          double total_wall_time = time_millis_ / time_predicted_percentage_;
+          // Naturally, that gives us the time remaining.
+          eta_sec = (total_wall_time - time_millis_) / 1e3;
+        }
+
+        const bool print_with_hours =
+            elapsed_sec >= 60 * 60 || eta_sec >= 60 * 60;
+
+        double sec = -1;
+        switch (*s) {
+        case 'e':  // elapsed, seconds
+        case 'w':  // elapsed, human-readable
+          sec = elapsed_sec;
+          break;
+        case 'E':  // ETA, seconds
+        case 'W':  // ETA, human-readable
+          sec = eta_sec;
+          break;
+        }
+
+        if (sec < 0)
+          snprintf(buf, sizeof(buf), "?");
+        else {
+          switch (*s) {
+          case 'e':  // elapsed, seconds
+          case 'E':  // ETA, seconds
+            snprintf(buf, sizeof(buf), "%.3f", sec);
+            break;
+          case 'w':  // elapsed, human-readable
+          case 'W':  // ETA, human-readable
+            if (print_with_hours)
+              snprintf(buf, sizeof(buf), FORMAT_TIME_HMMSS((int64_t)sec));
+            else
+              snprintf(buf, sizeof(buf), FORMAT_TIME_MMSS((int64_t)sec));
+            break;
+          }
+        }
+        out += buf;
+        break;
+      }
+
+      // Percentage of time spent out of the predicted time total
+      case 'P': {
+        snprintf(buf, sizeof(buf), "%3i%%",
+                 (int)(100. * time_predicted_percentage_));
         out += buf;
         break;
       }
@@ -232,6 +416,8 @@
       || config_.verbosity == BuildConfig::NO_STATUS_UPDATE)
     return;
 
+  RecalculateProgressPrediction();
+
   bool force_full_command = config_.verbosity == BuildConfig::VERBOSE;
 
   string to_print = edge->GetBinding("description");
diff --git a/src/status.h b/src/status.h
index e211ba3..a1a8fdd 100644
--- a/src/status.h
+++ b/src/status.h
@@ -24,10 +24,13 @@
 /// Abstract interface to object that tracks the status of a build:
 /// completion fraction, printing updates.
 struct Status {
-  virtual void PlanHasTotalEdges(int total) = 0;
-  virtual void BuildEdgeStarted(const Edge* edge, int64_t start_time_millis) = 0;
-  virtual void BuildEdgeFinished(Edge* edge, int64_t end_time_millis,
-                                 bool success, const std::string& output) = 0;
+  virtual void EdgeAddedToPlan(const Edge* edge) = 0;
+  virtual void EdgeRemovedFromPlan(const Edge* edge) = 0;
+  virtual void BuildEdgeStarted(const Edge* edge,
+                                int64_t start_time_millis) = 0;
+  virtual void BuildEdgeFinished(Edge* edge, int64_t start_time_millis,
+                                 int64_t end_time_millis, bool success,
+                                 const std::string& output) = 0;
   virtual void BuildLoadDyndeps() = 0;
   virtual void BuildStarted() = 0;
   virtual void BuildFinished() = 0;
@@ -43,10 +46,15 @@
 /// human-readable strings to stdout
 struct StatusPrinter : Status {
   explicit StatusPrinter(const BuildConfig& config);
-  virtual void PlanHasTotalEdges(int total);
+
+  /// Callbacks for the Plan to notify us about adding/removing Edge's.
+  virtual void EdgeAddedToPlan(const Edge* edge);
+  virtual void EdgeRemovedFromPlan(const Edge* edge);
+
   virtual void BuildEdgeStarted(const Edge* edge, int64_t start_time_millis);
-  virtual void BuildEdgeFinished(Edge* edge, int64_t end_time_millis,
-                                 bool success, const std::string& output);
+  virtual void BuildEdgeFinished(Edge* edge, int64_t start_time_millis,
+                                 int64_t end_time_millis, bool success,
+                                 const std::string& output);
   virtual void BuildLoadDyndeps();
   virtual void BuildStarted();
   virtual void BuildFinished();
@@ -71,7 +79,30 @@
   const BuildConfig& config_;
 
   int started_edges_, finished_edges_, total_edges_, running_edges_;
-  int64_t time_millis_;
+
+  /// How much wall clock elapsed so far?
+  int64_t time_millis_ = 0;
+
+  /// How much cpu clock elapsed so far?
+  int64_t cpu_time_millis_ = 0;
+
+  /// What percentage of predicted total time have elapsed already?
+  double time_predicted_percentage_ = 0.0;
+
+  /// Out of all the edges, for how many do we know previous time?
+  int eta_predictable_edges_total_ = 0;
+  /// And how much time did they all take?
+  int64_t eta_predictable_cpu_time_total_millis_ = 0;
+
+  /// Out of all the non-finished edges, for how many do we know previous time?
+  int eta_predictable_edges_remaining_ = 0;
+  /// And how much time will they all take?
+  int64_t eta_predictable_cpu_time_remaining_millis_ = 0;
+
+  /// For how many edges we don't know the previous run time?
+  int eta_unpredictable_edges_remaining_ = 0;
+
+  void RecalculateProgressPrediction();
 
   /// Prints progress output.
   LinePrinter printer_;
@@ -92,14 +123,14 @@
 
     double rate() { return rate_; }
 
-    void UpdateRate(int update_hint, int64_t time_millis_) {
+    void UpdateRate(int update_hint, int64_t time_millis) {
       if (update_hint == last_update_)
         return;
       last_update_ = update_hint;
 
       if (times_.size() == N)
         times_.pop();
-      times_.push(time_millis_);
+      times_.push(time_millis);
       if (times_.back() != times_.front())
         rate_ = times_.size() / ((times_.back() - times_.front()) / 1e3);
     }
diff --git a/src/status_test.cc b/src/status_test.cc
index 6e42490..411d3ed 100644
--- a/src/status_test.cc
+++ b/src/status_test.cc
@@ -22,8 +22,9 @@
 
   status.BuildStarted();
   // Before any task is done, the elapsed time must be zero.
-  EXPECT_EQ("[%/e0.000]",
-            status.FormatProgressStatus("[%%/e%e]", 0));
+  EXPECT_EQ("[%/e0.000]", status.FormatProgressStatus("[%%/e%e]", 0));
+  // Before any task is done, the elapsed time must be zero.
+  EXPECT_EQ("[%/e00:00]", status.FormatProgressStatus("[%%/e%w]", 0));
 }
 
 TEST(StatusTest, StatusFormatReplacePlaceholder) {
diff --git a/src/test.cc b/src/test.cc
index 11b1c9e..4d063da 100644
--- a/src/test.cc
+++ b/src/test.cc
@@ -235,3 +235,29 @@
 
   temp_dir_name_.clear();
 }
+
+ScopedFilePath::ScopedFilePath(ScopedFilePath&& other) noexcept
+    : path_(std::move(other.path_)), released_(other.released_) {
+  other.released_ = true;
+}
+
+/// It would be nice to use '= default' here instead but some old compilers
+/// such as GCC from Ubuntu 16.06 will not compile it with "noexcept", so just
+/// write it manually.
+ScopedFilePath& ScopedFilePath::operator=(ScopedFilePath&& other) noexcept {
+  if (this != &other) {
+    this->~ScopedFilePath();
+    new (this) ScopedFilePath(std::move(other));
+  }
+  return *this;
+}
+
+ScopedFilePath::~ScopedFilePath() {
+  if (!released_) {
+    unlink(path_.c_str());
+  }
+}
+
+void ScopedFilePath::Release() {
+  released_ = true;
+}
diff --git a/src/test.h b/src/test.h
index 4552c34..a4b9e19 100644
--- a/src/test.h
+++ b/src/test.h
@@ -15,94 +15,11 @@
 #ifndef NINJA_TEST_H_
 #define NINJA_TEST_H_
 
+#include <gtest/gtest.h>
+
 #include "disk_interface.h"
 #include "manifest_parser.h"
 #include "state.h"
-#include "util.h"
-
-// A tiny testing framework inspired by googletest, but much simpler and
-// faster to compile. It supports most things commonly used from googltest. The
-// most noticeable things missing: EXPECT_* and ASSERT_* don't support
-// streaming notes to them with operator<<, and for failing tests the lhs and
-// rhs are not printed. That's so that this header does not have to include
-// sstream, which slows down building ninja_test almost 20%.
-namespace testing {
-class Test {
-  bool failed_;
-  int assertion_failures_;
- public:
-  Test() : failed_(false), assertion_failures_(0) {}
-  virtual ~Test() {}
-  virtual void SetUp() {}
-  virtual void TearDown() {}
-  virtual void Run() = 0;
-
-  bool Failed() const { return failed_; }
-  int AssertionFailures() const { return assertion_failures_; }
-  void AddAssertionFailure() { assertion_failures_++; }
-  bool Check(bool condition, const char* file, int line, const char* error);
-};
-}
-
-void RegisterTest(testing::Test* (*)(), const char*);
-
-extern testing::Test* g_current_test;
-#define TEST_F_(x, y, name)                                           \
-  struct y : public x {                                               \
-    static testing::Test* Create() { return g_current_test = new y; } \
-    virtual void Run();                                               \
-  };                                                                  \
-  struct Register##y {                                                \
-    Register##y() { RegisterTest(y::Create, name); }                  \
-  };                                                                  \
-  Register##y g_register_##y;                                         \
-  void y::Run()
-
-#define TEST_F(x, y) TEST_F_(x, x##y, #x "." #y)
-#define TEST(x, y) TEST_F_(testing::Test, x##y, #x "." #y)
-
-#define EXPECT_EQ(a, b) \
-  g_current_test->Check(a == b, __FILE__, __LINE__, #a " == " #b)
-#define EXPECT_NE(a, b) \
-  g_current_test->Check(a != b, __FILE__, __LINE__, #a " != " #b)
-#define EXPECT_GT(a, b) \
-  g_current_test->Check(a > b, __FILE__, __LINE__, #a " > " #b)
-#define EXPECT_LT(a, b) \
-  g_current_test->Check(a < b, __FILE__, __LINE__, #a " < " #b)
-#define EXPECT_GE(a, b) \
-  g_current_test->Check(a >= b, __FILE__, __LINE__, #a " >= " #b)
-#define EXPECT_LE(a, b) \
-  g_current_test->Check(a <= b, __FILE__, __LINE__, #a " <= " #b)
-#define EXPECT_TRUE(a) \
-  g_current_test->Check(static_cast<bool>(a), __FILE__, __LINE__, #a)
-#define EXPECT_FALSE(a) \
-  g_current_test->Check(!static_cast<bool>(a), __FILE__, __LINE__, #a)
-
-#define ASSERT_EQ(a, b) \
-  if (!EXPECT_EQ(a, b)) { g_current_test->AddAssertionFailure(); return; }
-#define ASSERT_NE(a, b) \
-  if (!EXPECT_NE(a, b)) { g_current_test->AddAssertionFailure(); return; }
-#define ASSERT_GT(a, b) \
-  if (!EXPECT_GT(a, b)) { g_current_test->AddAssertionFailure(); return; }
-#define ASSERT_LT(a, b) \
-  if (!EXPECT_LT(a, b)) { g_current_test->AddAssertionFailure(); return; }
-#define ASSERT_GE(a, b) \
-  if (!EXPECT_GE(a, b)) { g_current_test->AddAssertionFailure(); return; }
-#define ASSERT_LE(a, b) \
-  if (!EXPECT_LE(a, b)) { g_current_test->AddAssertionFailure(); return; }
-#define ASSERT_TRUE(a)  \
-  if (!EXPECT_TRUE(a))  { g_current_test->AddAssertionFailure(); return; }
-#define ASSERT_FALSE(a) \
-  if (!EXPECT_FALSE(a)) { g_current_test->AddAssertionFailure(); return; }
-#define ASSERT_NO_FATAL_FAILURE(a)                           \
-  {                                                          \
-    int fail_count = g_current_test->AssertionFailures();    \
-    a;                                                       \
-    if (fail_count != g_current_test->AssertionFailures()) { \
-      g_current_test->AddAssertionFailure();                 \
-      return;                                                \
-    }                                                        \
-  }
 
 // Support utilities for tests.
 
@@ -182,4 +99,31 @@
   std::string temp_dir_name_;
 };
 
+/// A class that records a file path and ensures that it is removed
+/// on destruction. This ensures that tests do not keep stale files in the
+/// current directory where they run, even in case of assertion failure.
+struct ScopedFilePath {
+  /// Constructor just records the file path.
+  ScopedFilePath(const std::string& path) : path_(path) {}
+  ScopedFilePath(const char* path) : path_(path) {}
+
+  /// Allow move operations.
+  ScopedFilePath(ScopedFilePath&&) noexcept;
+  ScopedFilePath& operator=(ScopedFilePath&&) noexcept;
+
+  /// Destructor destroys the file, unless Release() was called.
+  ~ScopedFilePath();
+
+  /// Release the file, the destructor will not remove the file.
+  void Release();
+
+  const char* c_str() const { return path_.c_str(); }
+  const std::string& path() const { return path_; }
+  bool released() const { return released_; }
+
+ private:
+  std::string path_;
+  bool released_ = false;
+};
+
 #endif // NINJA_TEST_H_
diff --git a/src/util.cc b/src/util.cc
index ef5f103..7668e33 100644
--- a/src/util.cc
+++ b/src/util.cc
@@ -143,20 +143,19 @@
     return;
   }
 
-  const int kMaxPathComponents = 60;
-  char* components[kMaxPathComponents];
-  int component_count = 0;
-
   char* start = path;
   char* dst = start;
+  char* dst_start = dst;
   const char* src = start;
   const char* end = start + *len;
+  const char* src_next;
 
+  // For absolute paths, skip the leading directory separator
+  // as this one should never be removed from the result.
   if (IsPathSeparator(*src)) {
 #ifdef _WIN32
-
-    // network path starts with //
-    if (*len > 1 && IsPathSeparator(*(src + 1))) {
+    // Windows network path starts with //
+    if (src + 2 <= end && IsPathSeparator(src[1])) {
       src += 2;
       dst += 2;
     } else {
@@ -167,50 +166,126 @@
     ++src;
     ++dst;
 #endif
+    dst_start = dst;
+  } else {
+    // For relative paths, skip any leading ../ as these are quite common
+    // to reference source files in build plans, and doing this here makes
+    // the loop work below faster in general.
+    while (src + 3 <= end && src[0] == '.' && src[1] == '.' &&
+           IsPathSeparator(src[2])) {
+      src += 3;
+      dst += 3;
+    }
   }
 
-  while (src < end) {
-    if (*src == '.') {
-      if (src + 1 == end || IsPathSeparator(src[1])) {
-        // '.' component; eliminate.
-        src += 2;
-        continue;
-      } else if (src[1] == '.' && (src + 2 == end || IsPathSeparator(src[2]))) {
-        // '..' component.  Back up if possible.
-        if (component_count > 0) {
-          dst = components[component_count - 1];
-          src += 3;
-          --component_count;
-        } else {
-          *dst++ = *src++;
-          *dst++ = *src++;
-          *dst++ = *src++;
+  // Loop over all components of the paths _except_ the last one, in
+  // order to simplify the loop's code and make it faster.
+  int component_count = 0;
+  char* dst0 = dst;
+  for (; src < end; src = src_next) {
+#ifndef _WIN32
+    // Use memchr() for faster lookups thanks to optimized C library
+    // implementation. `hyperfine canon_perftest` shows a significant
+    // difference (e,g, 484ms vs 437ms).
+    const char* next_sep =
+        static_cast<const char*>(::memchr(src, '/', end - src));
+    if (!next_sep) {
+      // This is the last component, will be handled out of the loop.
+      break;
+    }
+#else
+    // Need to check for both '/' and '\\' so do not use memchr().
+    // Cannot use strpbrk() because end[0] can be \0 or something else!
+    const char* next_sep = src;
+    while (next_sep != end && !IsPathSeparator(*next_sep))
+      ++next_sep;
+    if (next_sep == end) {
+      // This is the last component, will be handled out of the loop.
+      break;
+    }
+#endif
+    // Position for next loop iteration.
+    src_next = next_sep + 1;
+    // Length of the component, excluding trailing directory.
+    size_t component_len = next_sep - src;
+
+    if (component_len <= 2) {
+      if (component_len == 0) {
+        continue;  // Ignore empty component, e.g. 'foo//bar' -> 'foo/bar'.
+      }
+      if (src[0] == '.') {
+        if (component_len == 1) {
+          continue;  // Ignore '.' component, e.g. './foo' -> 'foo'.
+        } else if (src[1] == '.') {
+          // Process the '..' component if found. Back up if possible.
+          if (component_count > 0) {
+            // Move back to start of previous component.
+            --component_count;
+            while (--dst > dst0 && !IsPathSeparator(dst[-1])) {
+              // nothing to do here, decrement happens before condition check.
+            }
+          } else {
+            dst[0] = '.';
+            dst[1] = '.';
+            dst[2] = src[2];
+            dst += 3;
+          }
+          continue;
         }
-        continue;
       }
     }
-
-    if (IsPathSeparator(*src)) {
-      src++;
-      continue;
-    }
-
-    if (component_count == kMaxPathComponents)
-      Fatal("path has too many components : %s", path);
-    components[component_count] = dst;
     ++component_count;
 
-    while (src != end && !IsPathSeparator(*src))
-      *dst++ = *src++;
-    *dst++ = *src++;  // Copy '/' or final \0 character as well.
+    // Copy or skip component, including trailing directory separator.
+    if (dst != src) {
+      ::memmove(dst, src, src_next - src);
+    }
+    dst += src_next - src;
   }
 
+  // Handling the last component that does not have a trailing separator.
+  // The logic here is _slightly_ different since there is no trailing
+  // directory separator.
+  size_t component_len = end - src;
+  do {
+    if (component_len == 0)
+      break;  // Ignore empty component (e.g. 'foo//' -> 'foo/')
+    if (src[0] == '.') {
+      if (component_len == 1)
+        break;  // Ignore trailing '.' (e.g. 'foo/.' -> 'foo/')
+      if (component_len == 2 && src[1] == '.') {
+        // Handle '..'. Back up if possible.
+        if (component_count > 0) {
+          while (--dst > dst0 && !IsPathSeparator(dst[-1])) {
+            // nothing to do here, decrement happens before condition check.
+          }
+        } else {
+          dst[0] = '.';
+          dst[1] = '.';
+          dst += 2;
+          // No separator to add here.
+        }
+        break;
+      }
+    }
+    // Skip or copy last component, no trailing separator.
+    if (dst != src) {
+      ::memmove(dst, src, component_len);
+    }
+    dst += component_len;
+  } while (0);
+
+  // Remove trailing path separator if any, but keep the initial
+  // path separator(s) if there was one (or two on Windows).
+  if (dst > dst_start && IsPathSeparator(dst[-1]))
+    dst--;
+
   if (dst == start) {
+    // Handle special cases like "aa/.." -> "."
     *dst++ = '.';
-    *dst++ = '\0';
   }
 
-  *len = dst - start - 1;
+  *len = dst - start;  // dst points after the trailing char here.
 #ifdef _WIN32
   uint64_t bits = 0;
   uint64_t bits_mask = 1;
@@ -369,8 +444,13 @@
     return -errno;
   }
 
+#ifdef __USE_LARGEFILE64
+  struct stat64 st;
+  if (fstat64(fileno(f), &st) < 0) {
+#else
   struct stat st;
   if (fstat(fileno(f), &st) < 0) {
+#endif
     err->assign(strerror(errno));
     fclose(f);
     return -errno;
@@ -456,10 +536,17 @@
         FORMAT_MESSAGE_IGNORE_INSERTS,
         NULL,
         err,
-        MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+        MAKELANGID(LANG_ENGLISH, SUBLANG_DEFAULT),
         (char*)&msg_buf,
         0,
         NULL);
+
+  if (msg_buf == nullptr) {
+    char fallback_msg[128] = {0};
+    snprintf(fallback_msg, sizeof(fallback_msg), "GetLastError() = %d", err);
+    return fallback_msg;
+  }
+
   string msg = msg_buf;
   LocalFree(msg_buf);
   return msg;
diff --git a/src/util_test.cc b/src/util_test.cc
index d58b170..d76954c 100644
--- a/src/util_test.cc
+++ b/src/util_test.cc
@@ -89,13 +89,57 @@
   EXPECT_EQ("/foo", path);
 #endif
 
+  path = "..";
+  CanonicalizePath(&path);
+  EXPECT_EQ("..", path);
+
+  path = "../";
+  CanonicalizePath(&path);
+  EXPECT_EQ("..", path);
+
+  path = "../foo";
+  CanonicalizePath(&path);
+  EXPECT_EQ("../foo", path);
+
+  path = "../foo/";
+  CanonicalizePath(&path);
+  EXPECT_EQ("../foo", path);
+
+  path = "../..";
+  CanonicalizePath(&path);
+  EXPECT_EQ("../..", path);
+
+  path = "../../";
+  CanonicalizePath(&path);
+  EXPECT_EQ("../..", path);
+
+  path = "./../";
+  CanonicalizePath(&path);
+  EXPECT_EQ("..", path);
+
+  path = "/..";
+  CanonicalizePath(&path);
+  EXPECT_EQ("/..", path);
+
+  path = "/../";
+  CanonicalizePath(&path);
+  EXPECT_EQ("/..", path);
+
+  path = "/../..";
+  CanonicalizePath(&path);
+  EXPECT_EQ("/../..", path);
+
+  path = "/../../";
+  CanonicalizePath(&path);
+  EXPECT_EQ("/../..", path);
+
   path = "/";
   CanonicalizePath(&path);
-  EXPECT_EQ("", path);
+  EXPECT_EQ("/", path);
 
   path = "/foo/..";
   CanonicalizePath(&path);
-  EXPECT_EQ("", path);
+  EXPECT_EQ("/", path);
 
   path = ".";
   CanonicalizePath(&path);
@@ -108,6 +152,10 @@
   path = "foo/..";
   CanonicalizePath(&path);
   EXPECT_EQ(".", path);
+
+  path = "foo/.._bar";
+  CanonicalizePath(&path);
+  EXPECT_EQ("foo/.._bar", path);
 }
 
 #ifdef _WIN32
@@ -171,7 +219,7 @@
 
   path = "\\";
   CanonicalizePath(&path);
-  EXPECT_EQ("", path);
+  EXPECT_EQ("/", path);
 }
 
 TEST(CanonicalizePath, SlashTracking) {
@@ -321,8 +369,53 @@
   EXPECT_EQ(58, std::count(path.begin(), path.end(), '\\'));
   CanonicalizePath(&path, &slash_bits);
   EXPECT_EQ(slash_bits, 0x3ffffffffffffff);
+
+  // More than 60 components is now completely ok too.
+  path =
+      "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\"
+      "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\"
+      "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\"
+      "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\"
+      "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\"
+      "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\"
+      "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\"
+      "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\"
+      "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\"
+      "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\"
+      "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\"
+      "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\"
+      "a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\a\\"
+      "a\\a\\a\\a\\a\\a\\a\\a\\a\\x\\y.h";
+  EXPECT_EQ(218, std::count(path.begin(), path.end(), '\\'));
+  CanonicalizePath(&path, &slash_bits);
+  EXPECT_EQ(slash_bits, 0xffffffffffffffff);
 }
-#endif
+#else   // !_WIN32
+TEST(CanonicalizePath, TooManyComponents) {
+  string path;
+  uint64_t slash_bits;
+
+  // More than 60 components is now completely ok.
+  path =
+      "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/"
+      "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/"
+      "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/"
+      "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/"
+      "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/"
+      "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/"
+      "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/"
+      "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/"
+      "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/"
+      "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/"
+      "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/"
+      "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/"
+      "a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/a/"
+      "a/a/a/a/a/a/a/a/a/x/y.h";
+  EXPECT_EQ(218, std::count(path.begin(), path.end(), '/'));
+  CanonicalizePath(&path, &slash_bits);
+  EXPECT_EQ(slash_bits, 0x0);
+}
+#endif  // !_WIN32
 
 TEST(CanonicalizePath, UpDir) {
   string path, err;
@@ -353,11 +446,13 @@
   EXPECT_EQ(strlen("foo"), len);
   EXPECT_EQ("foo/. bar/.", string(path));
 
+  // Verify that foo/..file gets canonicalized to 'file' without
+  // touching the rest of the string.
   path = "foo/../file bar/.";
   len = strlen("foo/../file");
   CanonicalizePath(&path[0], &len, &unused);
   EXPECT_EQ(strlen("file"), len);
-  EXPECT_EQ("file ./file bar/.", string(path));
+  EXPECT_EQ("file../file bar/.", string(path));
 }
 
 TEST(PathEscaping, TortureTest) {
diff --git a/src/version.cc b/src/version.cc
index 0952488..ca54c37 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -20,7 +20,7 @@
 
 using namespace std;
 
-const char* kNinjaVersion = "1.11.1";
+const char* kNinjaVersion = "1.12.0";
 
 void ParseVersion(const string& version, int* major, int* minor) {
   size_t end = version.find('.');
diff --git a/windows/ninja.manifest b/windows/ninja.manifest
index dab929e..47949dd 100644
--- a/windows/ninja.manifest
+++ b/windows/ninja.manifest
@@ -3,6 +3,7 @@
   <application>
     <windowsSettings>
       <activeCodePage xmlns="http://schemas.microsoft.com/SMI/2019/WindowsSettings">UTF-8</activeCodePage>
+      <longPathAware  xmlns="http://schemas.microsoft.com/SMI/2016/WindowsSettings">true</longPathAware>
     </windowsSettings>
   </application>
 </assembly>