[release] Snap to fa450cd574
Change-Id: I1e7ed089e8583b9aaa14c90bbb6a5597d0a78e11
diff --git a/LICENSE b/LICENSE
index 6b55e08..f839b1d 100644
--- a/LICENSE
+++ b/LICENSE
@@ -10,9 +10,6 @@
 copyright notice, this list of conditions and the following disclaimer
 in the documentation and/or other materials provided with the
 distribution.
-   * Neither the name of Google LLC. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
 
 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
diff --git a/build/BUILD.gn b/build/BUILD.gn
index dd3f86f..c89126b 100644
--- a/build/BUILD.gn
+++ b/build/BUILD.gn
@@ -697,18 +697,7 @@
     "//src/sys/gtest_v1_runner/*",
     "//src/sys/gtest_v1_runner/tests/*",
     "//src/sys/installer/*",
-    "//src/sys/lib/cm_fidl_validator/*",
-    "//src/sys/lib/cm_json/*",
-    "//src/sys/lib/cm_rust/*",
-    "//src/sys/lib/cm_types/*",
-    "//src/sys/lib/component_id_index/*",
-    "//src/sys/lib/fidl-connector/*",
-    "//src/sys/lib/fidl-fuchsia-pkg-ext/*",
-    "//src/sys/lib/fidl-fuchsia-pkg-rewrite-ext/*",
-    "//src/sys/lib/fuchsia-bootfs/*",
-    "//src/sys/lib/fuchsia_backoff/*",
     "//src/sys/lib/library_loader/*",
-    "//src/sys/lib/runner/*",
     "//src/sys/locate/*",
     "//src/sys/pkg/bin/amber/*",
     "//src/sys/pkg/bin/fake-channel-control/*",
diff --git a/build/go/go_build.gni b/build/go/go_build.gni
index d96bceb..ed5c90c 100644
--- a/build/go/go_build.gni
+++ b/build/go/go_build.gni
@@ -121,6 +121,9 @@
     target_name = main_target_name
     _variant_shared = false
 
+    if (carchive) {
+    }
+
     forward_variables_from(invoker,
                            [
                              "testonly",
@@ -254,14 +257,36 @@
 
     if (carchive) {
       args += [ "--buildmode=c-archive" ]
+
+      # carchive is only set when building go fuzzers as static libraries,
+      # that is then linked into a fuzzer executables. Make sure that it
+      # is built in the same build variant as the one used for these, to
+      # ensure they grab the right dependencies.
+      variant_selector_target_type = "fuzzed_executable"
     }
 
     if (is_fuchsia) {
+      # Inject a dependency to libfdio.so. Note that as a special case,
+      # when building fuzzing binaries, this library should be built in
+      # a non-fuzzing variant (because the fuzzing runtime depends on it).
+      # So compute the correct toolchain for it directly here.
+      _fdio_toolchain =
+          string_replace(current_toolchain, "-fuzzer", "") + "-shared"
+      _fdio_label_with_toolchain = "//sdk/lib/fdio($_fdio_toolchain)"
+
       deps += [
-        "//sdk/lib/fdio",
         "//zircon/public/sysroot:go_binary_deps",
+        _fdio_label_with_toolchain,
       ]
 
+      if (_fdio_toolchain != current_toolchain + "-shared") {
+        args += [
+          "--lib-dir",
+          rebase_path(
+              get_label_info(_fdio_label_with_toolchain, "root_out_dir")),
+        ]
+      }
+
       if (!carchive && output_breakpad_syms && host_os != "mac") {
         args += [
           "--dump-syms",
diff --git a/build/go/go_fuzzer.gni b/build/go/go_fuzzer.gni
index cae0275..fbcbcfa 100644
--- a/build/go/go_fuzzer.gni
+++ b/build/go/go_fuzzer.gni
@@ -93,9 +93,7 @@
       disable_syslog_backend = true
     }
 
-    # As noted in go_build.gni, Go libraries do not generate distinct outputs for each variants,
-    # but always use the output of the base toolchain.
-    deps = [ ":${wrapper_name}(${toolchain_variant.base})" ]
+    deps = [ ":$wrapper_name" ]
     base_root_out_dir = get_label_info(deps[0], "root_out_dir")
     base_library_name = get_label_info(deps[0], "name")
     libs = [ "$base_root_out_dir/${base_library_name}.a" ]
diff --git a/build/images/recovery/sysmgr-eng-golden.json b/build/images/recovery/sysmgr-eng-golden.json
index 3c3ed4d3f..ccda79c 100644
--- a/build/images/recovery/sysmgr-eng-golden.json
+++ b/build/images/recovery/sysmgr-eng-golden.json
@@ -1,7 +1,8 @@
 {
   "apps": [
     "fuchsia-pkg://fuchsia.com/netcfg#meta/netcfg.cmx",
-    "fuchsia-pkg://fuchsia.com/system_recovery#meta/system_recovery.cmx"
+    "fuchsia-pkg://fuchsia.com/system_recovery#meta/system_recovery.cmx",
+    "fuchsia-pkg://fuchsia.com/timekeeper#meta/timekeeper.cmx"
   ],
   "services": {
     "fuchsia.net.NameLookup": "fuchsia-pkg://fuchsia.com/dns-resolver#meta/dns-resolver.cmx",
@@ -23,7 +24,6 @@
     "fuchsia.posix.socket.Provider": "fuchsia-pkg://fuchsia.com/netstack#meta/netstack.cmx",
     "fuchsia.stash.SecureStore": "fuchsia-pkg://fuchsia.com/stash#meta/stash_secure.cmx",
     "fuchsia.sysmem.Allocator": "fuchsia-pkg://fuchsia.com/sysmem_connector#meta/sysmem_connector.cmx",
-    "fuchsia.time.Utc": "fuchsia-pkg://fuchsia.com/timekeeper#meta/timekeeper.cmx",
     "fuchsia.wlan.device.service.DeviceService": "fuchsia-pkg://fuchsia.com/wlanstack#meta/wlanstack.cmx",
     "fuchsia.wlan.policy.AccessPointListener": "fuchsia-pkg://fuchsia.com/wlancfg#meta/wlancfg.cmx",
     "fuchsia.wlan.policy.AccessPointProvider": "fuchsia-pkg://fuchsia.com/wlancfg#meta/wlancfg.cmx",
@@ -40,8 +40,7 @@
     "fuchsia.wlan.policy.AccessPointProvider",
     "fuchsia.wlan.policy.AccessPointListener",
     "fuchsia.wlan.product.deprecatedclient.DeprecatedClient",
-    "fuchsia.wlan.product.deprecatedconfiguration.DeprecatedConfigurator",
-    "fuchsia.time.Utc"
+    "fuchsia.wlan.product.deprecatedconfiguration.DeprecatedConfigurator"
   ],
   "update_dependencies": [
     "fuchsia.posix.socket.Provider",
diff --git a/build/images/recovery/sysmgr-fdr-golden.json b/build/images/recovery/sysmgr-fdr-golden.json
index 3c3ed4d3f..ccda79c 100644
--- a/build/images/recovery/sysmgr-fdr-golden.json
+++ b/build/images/recovery/sysmgr-fdr-golden.json
@@ -1,7 +1,8 @@
 {
   "apps": [
     "fuchsia-pkg://fuchsia.com/netcfg#meta/netcfg.cmx",
-    "fuchsia-pkg://fuchsia.com/system_recovery#meta/system_recovery.cmx"
+    "fuchsia-pkg://fuchsia.com/system_recovery#meta/system_recovery.cmx",
+    "fuchsia-pkg://fuchsia.com/timekeeper#meta/timekeeper.cmx"
   ],
   "services": {
     "fuchsia.net.NameLookup": "fuchsia-pkg://fuchsia.com/dns-resolver#meta/dns-resolver.cmx",
@@ -23,7 +24,6 @@
     "fuchsia.posix.socket.Provider": "fuchsia-pkg://fuchsia.com/netstack#meta/netstack.cmx",
     "fuchsia.stash.SecureStore": "fuchsia-pkg://fuchsia.com/stash#meta/stash_secure.cmx",
     "fuchsia.sysmem.Allocator": "fuchsia-pkg://fuchsia.com/sysmem_connector#meta/sysmem_connector.cmx",
-    "fuchsia.time.Utc": "fuchsia-pkg://fuchsia.com/timekeeper#meta/timekeeper.cmx",
     "fuchsia.wlan.device.service.DeviceService": "fuchsia-pkg://fuchsia.com/wlanstack#meta/wlanstack.cmx",
     "fuchsia.wlan.policy.AccessPointListener": "fuchsia-pkg://fuchsia.com/wlancfg#meta/wlancfg.cmx",
     "fuchsia.wlan.policy.AccessPointProvider": "fuchsia-pkg://fuchsia.com/wlancfg#meta/wlancfg.cmx",
@@ -40,8 +40,7 @@
     "fuchsia.wlan.policy.AccessPointProvider",
     "fuchsia.wlan.policy.AccessPointListener",
     "fuchsia.wlan.product.deprecatedclient.DeprecatedClient",
-    "fuchsia.wlan.product.deprecatedconfiguration.DeprecatedConfigurator",
-    "fuchsia.time.Utc"
+    "fuchsia.wlan.product.deprecatedconfiguration.DeprecatedConfigurator"
   ],
   "update_dependencies": [
     "fuchsia.posix.socket.Provider",
diff --git a/build/testing/environments.gni b/build/testing/environments.gni
index 5b13a9b..809477b 100644
--- a/build/testing/environments.gni
+++ b/build/testing/environments.gni
@@ -12,6 +12,9 @@
 
   # The list of environment names to include in "basic_envs".
   basic_env_names = [ "emu" ]
+
+  # Adds GCE as a basic env if true.
+  enable_test_on_gce = false
 }
 
 _all_named_envs = []
@@ -145,6 +148,22 @@
   },
 ]
 
+if (enable_test_on_gce) {
+  basic_env_names += [ "gce" ]
+  gce_env = {
+    dimensions = {
+      device_type = "GCE"
+    }
+  }
+
+  _all_named_envs += [
+    {
+      name = "gce"
+      env = gce_env
+    },
+  ]
+}
+
 ### END Individual test environments
 
 ### Aliases ###
diff --git a/build/testing/platforms.gni b/build/testing/platforms.gni
index 175308b..231072f 100644
--- a/build/testing/platforms.gni
+++ b/build/testing/platforms.gni
@@ -110,4 +110,10 @@
     device_type = "Vim3"
     cpu = "arm64"
   },
+
+  # fxbug.dev/9127
+  {
+    device_type = "GCE"
+    cpu = "x64"
+  },
 ]
diff --git a/build/testing/test_spec.gni b/build/testing/test_spec.gni
index abe35e7..37f1a5fe 100644
--- a/build/testing/test_spec.gni
+++ b/build/testing/test_spec.gni
@@ -244,6 +244,7 @@
       "AEMU",
       "QEMU",
       "Intel NUC Kit NUC7i5DNHE",
+      "GCE",
     ]
   } else if (board_name == "qemu-arm64") {
     allowed_device_types = [ "QEMU" ]
diff --git a/build/zbi/zbi.gni b/build/zbi/zbi.gni
index 7fd5af7..abca11d 100644
--- a/build/zbi/zbi.gni
+++ b/build/zbi/zbi.gni
@@ -53,7 +53,7 @@
 #     Type: string
 #     Default: target_name
 #
-#   deps, data_deps, testonly, assert_no_deps, visibility
+#   deps, data_deps, testonly, assert_no_deps, metadata, visibility
 #     See `gn help`.
 template("zbi") {
   main_target = target_name
@@ -170,21 +170,6 @@
       args += [ "--compressed=$compress" ]
     }
 
-    # This templates merges the metadata.images scopes provided by the caller
-    # with th edefault values computed below. Prepare this operation here.
-    if (defined(invoker.metadata)) {
-      _metadata = invoker.metadata
-      if (defined(_metadata.images)) {
-        _metadata_images = _metadata.images
-      }
-    }
-    if (!defined(_metadata_images)) {
-      _metadata_images = [
-        {
-        },
-      ]
-    }
-
     metadata = {
       images = []
       migrated_zbi_input_args = []
@@ -193,31 +178,26 @@
       # input, but not this ZBI's inputs.
       migrated_zbi_barrier = []
 
-      if (defined(_metadata)) {
-        forward_variables_from(_metadata, "*", [ "images" ])
+      if (defined(invoker.metadata)) {
+        forward_variables_from(invoker.metadata, "*")
       }
 
       # For the //:images build_api_module().
-      foreach(_images_entry, _metadata_images) {
-        images += [
-          {
-            label = get_label_info(":$target_name", "label_with_toolchain")
-            name = _output_name
-            path = rebase_path(output_file, root_build_dir)
-            type = "zbi"
-            cpu = cpu
-            compressed =
-                !defined(invoker.compress) ||
-                (invoker.compress != false && invoker.compress != "none")
-            if (defined(testonly) && testonly) {
-              testonly = true
-            }
-            forward_variables_from(invoker, [ "tags" ])
-
-            forward_variables_from(_images_entry, "*")
-          },
-        ]
-      }
+      images += [
+        {
+          label = get_label_info(":$target_name", "label_with_toolchain")
+          name = _output_name
+          path = rebase_path(output_file, root_build_dir)
+          type = "zbi"
+          cpu = cpu
+          compressed = !defined(invoker.compress) ||
+                       (invoker.compress != false && invoker.compress != "none")
+          if (defined(testonly) && testonly) {
+            testonly = true
+          }
+          forward_variables_from(invoker, [ "tags" ])
+        },
+      ]
 
       # Provide metadata so that a zbi() target can also act as if it were a
       # zbi_input() with `type = "zbi"` and $sources of this target's $outputs.
diff --git a/docs/development/components/build.md b/docs/development/components/build.md
index 994bd8a3..5a1bef6 100644
--- a/docs/development/components/build.md
+++ b/docs/development/components/build.md
@@ -206,7 +206,7 @@
    }
    ```
 
-  * {Dart}
+   * {Dart}
 
    ```gn
    import("//build/dart/dart_component.gni")
@@ -682,6 +682,70 @@
    }
    ```
 
+### Packages with a single component {#packages-with-single-component}
+
+Developers often define a package that contains a single component.
+The template below fuses together `fuchsia_package()` and `fuchsia_component()`
+as a convenience.
+
+   * {C++}
+
+   ```gn
+   import("//src/sys/build/components.gni")
+
+   executable("rot13_encoder_decoder") {
+     sources = [ "rot13_encoder_decoder.cc" ]
+   }
+
+   fuchsia_package_with_single_component("rot13") {
+     manifest = "meta/rot13.cmx"
+     deps = [ ":rot13_encoder_decoder" ]
+   }
+   ```
+
+   * {Rust}
+
+   ```gn
+   import("//build/rust/rustc_binary.gni")
+   import("//src/sys/build/components.gni")
+
+   rustc_binary("rot13_encoder_decoder") {
+   }
+
+   fuchsia_package_with_single_component("rot13") {
+     manifest = "meta/rot13.cmx"
+     deps = [ ":rot13_encoder_decoder" ]
+   }
+   ```
+
+   * {Go}
+
+   ```gn
+   import("//build/go/go_binary.gni")
+   import("//src/sys/build/components.gni")
+
+   go_binary("rot13_encoder_decoder") {
+   }
+
+   fuchsia_component("rot13") {
+     manifest = "meta/rot13.cmx"
+     deps = [ ":rot13_encoder_decoder" ]
+   }
+   ```
+
+Packages are units of distribution. It is beneficial to define multiple
+components in the same package if you need to guarantee that several
+components are always co-present, or if you'd like to be able to update
+several components at once (by updating a single package).
+
+This pattern is also commonly used to create hermetic integration tests.
+For instance an integration test between two components where one is a client
+of a service implemented in another component would include both the client
+and server components.
+
+However for the sake of simplicity, if you're developing a package with just
+a single component then this template will save you some boilerplate.
+
 ## Test-driven development
 
 The `fx smoke-test` command automatically detects all tests that are known to
diff --git a/examples/diagnostics/logs/BUILD.gn b/examples/diagnostics/logs/BUILD.gn
index 4b48e38..83e6ae5 100644
--- a/examples/diagnostics/logs/BUILD.gn
+++ b/examples/diagnostics/logs/BUILD.gn
@@ -6,11 +6,15 @@
   testonly = true
   deps = [
     ":tests",
+    "ansi-junk",
     "rust",
   ]
 }
 
 group("tests") {
   testonly = true
-  data_deps = [ "rust:tests" ]
+  deps = [
+    "ansi-junk:tests",
+    "rust:tests",
+  ]
 }
diff --git a/examples/diagnostics/logs/ansi-junk/BUILD.gn b/examples/diagnostics/logs/ansi-junk/BUILD.gn
new file mode 100644
index 0000000..da97147
--- /dev/null
+++ b/examples/diagnostics/logs/ansi-junk/BUILD.gn
@@ -0,0 +1,54 @@
+# Copyright 2020 The Fuchsia Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/rust/rustc_binary.gni")
+import("//src/sys/build/components.gni")
+
+group("ansi-junk") {
+  testonly = true
+  deps = [
+    ":package",
+    ":tests",
+  ]
+}
+
+group("tests") {
+  testonly = true
+  deps = [ ":ansi-junk-unittests" ]
+}
+
+rustc_binary("bin") {
+  name = "ansi-junk"
+
+  # Generates a GN target for unit-tests with the label `bin_test`, and
+  # a binary named `ansi_junk_bin_test`.
+  with_unit_tests = true
+
+  deps = [
+    "//src/lib/fuchsia",
+    "//src/lib/fuchsia-async",
+    "//src/lib/fuchsia-component",
+    "//third_party/rust_crates:anyhow",
+    "//third_party/rust_crates:tracing",
+  ]
+
+  sources = [ "src/main.rs" ]
+}
+
+fuchsia_component("component") {
+  component_name = "ansi-junk"
+  manifest = "meta/ansi-junk.cmx"
+  deps = [ ":bin" ]
+}
+
+fuchsia_package("package") {
+  package_name = "ansi-junk"
+  deps = [ ":component" ]
+}
+
+# Run with `fx test ansi-junk-unittests`.
+fuchsia_unittest_package("ansi-junk-unittests") {
+  executable_path = "bin/ansi_junk_bin_test"
+  deps = [ ":bin_test" ]
+}
diff --git a/examples/diagnostics/logs/ansi-junk/README.md b/examples/diagnostics/logs/ansi-junk/README.md
new file mode 100644
index 0000000..69c38b6
--- /dev/null
+++ b/examples/diagnostics/logs/ansi-junk/README.md
@@ -0,0 +1,25 @@
+# ansi-junk
+
+A proof-of-concept demonstration of how ANSI escape codes can be "leaked" by logging tools across
+multiple messages when rendered with `log_listener` in a terminal..
+
+## Building
+
+To add this component to your build, append
+`--with examples/diagnostics/logs/ansi-junk`
+to the `fx set` invocation.
+
+## Running
+
+```
+$ fx shell run fuchsia-pkg://fuchsia.com/ansi-junk#meta/ansi-junk.cmx
+```
+
+## Testing
+
+Unit tests for ansi-junk are available in the `ansi-junk-tests`
+package.
+
+```
+$ fx test ansi-junk-tests
+```
diff --git a/src/sys/lib/component_id_index/meta/component_id_index_tests.cmx b/examples/diagnostics/logs/ansi-junk/meta/ansi-junk.cmx
similarity index 67%
rename from src/sys/lib/component_id_index/meta/component_id_index_tests.cmx
rename to examples/diagnostics/logs/ansi-junk/meta/ansi-junk.cmx
index af5d0ec..6ebf836 100644
--- a/src/sys/lib/component_id_index/meta/component_id_index_tests.cmx
+++ b/examples/diagnostics/logs/ansi-junk/meta/ansi-junk.cmx
@@ -3,6 +3,6 @@
         "sdk/lib/diagnostics/syslog/client.shard.cmx"
     ],
     "program": {
-        "binary": "test/component_id_index_tests"
+        "binary": "bin/ansi_junk"
     }
 }
diff --git a/examples/diagnostics/logs/ansi-junk/src/main.rs b/examples/diagnostics/logs/ansi-junk/src/main.rs
new file mode 100644
index 0000000..b05efadc
--- /dev/null
+++ b/examples/diagnostics/logs/ansi-junk/src/main.rs
@@ -0,0 +1,19 @@
+// Copyright 2020 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#[fuchsia::component]
+async fn main() -> Result<(), anyhow::Error> {
+    tracing::info!("Initialized.");
+    tracing::info!("Bell: \x1b[\x07");
+    tracing::info!("\x1b[32;1mstart green");
+    Ok(())
+}
+
+#[cfg(test)]
+mod tests {
+    #[fuchsia::test]
+    async fn smoke_test() {
+        assert!(true);
+    }
+}
diff --git a/garnet/bin/run_test_component/test_metadata.cc b/garnet/bin/run_test_component/test_metadata.cc
index 08d10dc..46aaec9 100644
--- a/garnet/bin/run_test_component/test_metadata.cc
+++ b/garnet/bin/run_test_component/test_metadata.cc
@@ -81,7 +81,6 @@
     fuchsia::sys::test::CacheControl::Name_,
     fuchsia::sysinfo::SysInfo::Name_,
     fuchsia::sysmem::Allocator::Name_,
-    fuchsia::time::Utc::Name_,
     fuchsia::tracing::provider::Registry::Name_,
     fuchsia::tracing::kernel::Controller::Name_,
     fuchsia::tracing::kernel::Reader::Name_,
diff --git a/scripts/fxtest/lib/cmd.dart b/scripts/fxtest/lib/cmd.dart
index e387ce3..e266b8f 100644
--- a/scripts/fxtest/lib/cmd.dart
+++ b/scripts/fxtest/lib/cmd.dart
@@ -154,13 +154,17 @@
     }
 
     if (testsConfig.flags.shouldRebuild) {
-      Set<String> buildTargets =
-          TestBundle.calculateMinimalBuildTargets(parsedManifest.testBundles);
+      Set<String> buildArgs = TestBundle.calculateMinimalBuildTargets(
+          testsConfig, parsedManifest.testBundles);
+      // if all tests in the bundle are host-based, skip zircon build.
+      if (buildArgs.isNotEmpty &&
+          !TestBundle.hasDeviceTests(parsedManifest.testBundles)) {
+        buildArgs.add('--no-zircon');
+      }
       emitEvent(TestInfo(testsConfig.wrapWith(
-          '> fx build ${buildTargets?.join(' ') ?? ''}', [green, styleBold])));
+          '> fx build ${buildArgs?.join(' ') ?? ''}', [green, styleBold])));
       try {
-        await fxCommandRun(
-            testsConfig.fxEnv.fx, 'build', buildTargets?.toList());
+        await fxCommandRun(testsConfig.fxEnv.fx, 'build', buildArgs?.toList());
       } on FxRunException {
         emitEvent(FatalError(
             '\'fx test\' could not perform a successful build. Try to run \'fx build\' manually or use the \'--no-build\' flag'));
@@ -262,14 +266,43 @@
         workingDirectory: testsConfig.fxEnv.outputDir,
       );
 
+  bool _maybeAddPackageHash(
+      TestBundle testBundle, PackageRepository repository) {
+    if (testsConfig.flags.shouldUsePackageHash &&
+        testBundle.testDefinition.packageUrl != null) {
+      if (repository == null) {
+        emitEvent(TestResult(
+            runtime: Duration(seconds: 0),
+            exitCode: failureExitCode,
+            message:
+                'Package repository is not available. Run "fx serve-updates" again or use the "--no-use-package-hash" flag.',
+            testName: testBundle.testDefinition.name));
+        return false;
+      } else {
+        String packageName = testBundle.testDefinition.packageUrl.packageName;
+        if (repository[packageName] == null) {
+          emitEvent(TestResult(
+              runtime: Duration(seconds: 0),
+              exitCode: failureExitCode,
+              message:
+                  'Package $packageName is not in the package repository, check if it was correctly built or use the "--no-use-package-hash" flag.',
+              testName: testBundle.testDefinition.name));
+          return false;
+        }
+        testBundle.testDefinition.hash = repository[packageName].merkle;
+      }
+    }
+    return true;
+  }
+
   Future<void> runTests(List<TestBundle> testBundles) async {
     // Enforce a limit
     var _testBundles = testsConfig.flags.limit > 0 &&
             testsConfig.flags.limit < testBundles.length
         ? testBundles.sublist(0, testsConfig.flags.limit)
         : testBundles;
-
-    if (!await checklist.isDeviceReady(_testBundles)) {
+    if (!testsConfig.flags.infoOnly &&
+        !await checklist.isDeviceReady(_testBundles)) {
       emitEvent(FatalError('Device is not ready for running device tests'));
       _exitCodeSetter(failureExitCode);
       return;
@@ -287,10 +320,10 @@
     }
 
     for (TestBundle testBundle in _testBundles) {
-      if (packageRepository != null &&
-          testBundle.testDefinition.packageUrl != null) {
-        String packageName = testBundle.testDefinition.packageUrl.packageName;
-        testBundle.testDefinition.hash = packageRepository[packageName].merkle;
+      if (!testsConfig.flags.infoOnly &&
+          !_maybeAddPackageHash(testBundle, packageRepository)) {
+        _exitCodeSetter(failureExitCode);
+        continue;
       }
       await testBundle.run().forEach((TestEvent event) {
         emitEvent(event);
diff --git a/scripts/fxtest/lib/output/output_formatter.dart b/scripts/fxtest/lib/output/output_formatter.dart
index cd91be8..d790ebd 100644
--- a/scripts/fxtest/lib/output/output_formatter.dart
+++ b/scripts/fxtest/lib/output/output_formatter.dart
@@ -43,7 +43,7 @@
     @required this.wrapWith,
     this.simpleOutput = false,
     OutputBuffer buffer,
-  })  : buffer = buffer ?? OutputBuffer.realIO();
+  }) : buffer = buffer ?? OutputBuffer.realIO();
 
   factory OutputFormatter.fromConfig(
     TestsConfig testsConfig, {
@@ -72,6 +72,9 @@
   String get suiteExecutionTime => _getExecutionTime(_testSuiteStartTime);
 
   String _getExecutionTime(DateTime _startTime) {
+    if (_startTime == null) {
+      return '-';
+    }
     Duration elapsedTime = DateTime.now().difference(_startTime);
     String minutes = elapsedTime.inMinutes.toString().padLeft(2, '0');
     String seconds = (elapsedTime.inSeconds % 60).toString().padLeft(2, '0');
@@ -171,7 +174,9 @@
   void _handleTestInfo(TestInfo event);
 
   /// Handler for fatal errors.
-  void _handleFatalError(FatalError event);
+  void _handleFatalError(FatalError event) {
+    buffer.addLine(wrapWith(event.message, [red]));
+  }
 
   /// Handler for the stream of stdout and stderr content produced by running
   /// tests.
@@ -333,11 +338,6 @@
     }
   }
 
-  @override
-  void _handleFatalError(FatalError event) {
-    buffer.addLine(wrapWith(event.message, [red]));
-  }
-
   void _finalizeLastTestLine() {
     if (!cleanEndOfOutput) return;
     var verboseHint = wrapWith(
@@ -375,8 +375,6 @@
   @override
   void _handleTestInfo(TestInfo event) {}
   @override
-  void _handleFatalError(FatalError event) {}
-  @override
   void _handleTestStarted(TestStarted event) {
     buffer.addLines([
       ...infoPrint(event.testDefinition),
diff --git a/scripts/fxtest/lib/test_bundle.dart b/scripts/fxtest/lib/test_bundle.dart
index 26b6f2f..1f5ca41 100644
--- a/scripts/fxtest/lib/test_bundle.dart
+++ b/scripts/fxtest/lib/test_bundle.dart
@@ -6,6 +6,7 @@
 import 'dart:io';
 
 import 'package:fxtest/fxtest.dart';
+import 'package:fxutils/fxutils.dart' as fxutils;
 import 'package:path/path.dart' as p;
 import 'package:meta/meta.dart';
 
@@ -75,13 +76,21 @@
   /// Calculate the minimal set of build targets based on tests in [testBundles]
   /// Returns null for a full build.
   static Set<String> calculateMinimalBuildTargets(
-      List<TestBundle> testBundles) {
+      TestsConfig testsConfig, List<TestBundle> testBundles) {
     Set<String> targets = {};
     for (var e in testBundles) {
       switch (e.testDefinition.testType) {
         case TestType.component:
         case TestType.suite:
-          targets.add('updates');
+          String target = 'updates';
+          if (testsConfig.fxEnv.isFeatureEnabled('incremental')) {
+            if (e.testDefinition.packageLabel.isNotEmpty) {
+              target = fxutils.getBuildTarget(e.testDefinition.packageLabel);
+            } else if (e.testDefinition.label.isNotEmpty) {
+              target = fxutils.getBuildTarget(e.testDefinition.label);
+            }
+          }
+          targets.add(target);
           break;
         case TestType.command:
         case TestType.host:
diff --git a/scripts/fxtest/lib/test_definition.dart b/scripts/fxtest/lib/test_definition.dart
index 9a4c771..f73f2f3 100644
--- a/scripts/fxtest/lib/test_definition.dart
+++ b/scripts/fxtest/lib/test_definition.dart
@@ -18,6 +18,7 @@
   final String runtimeDeps;
   final String path;
   final String label;
+  final String packageLabel;
   final String name;
   final String os;
   final PackageUrl packageUrl;
@@ -37,6 +38,7 @@
     this.command,
     this.runtimeDeps,
     this.label,
+    this.packageLabel,
     this.path,
     this.maxLogSeverity,
     this.parallel,
@@ -60,6 +62,7 @@
       cpu: testDetails['cpu'] ?? '',
       runtimeDeps: testDetails['runtime_deps'] ?? '',
       label: testDetails['label'] ?? '',
+      packageLabel: testDetails['package_label'] ?? '',
       name: testDetails['name'] ?? '',
       os: testDetails['os'] ?? '',
       packageUrl: testDetails['package_url'] == null
@@ -78,6 +81,7 @@
   command: ${(command ?? []).join(" ")}
   deps_file: $runtimeDeps
   label: $label
+  package_label: ${packageLabel ?? ''}
   package_url: ${packageUrl ?? ''}
   path: $path
   name: $name
diff --git a/scripts/fxtest/test/setup_test.dart b/scripts/fxtest/test/setup_test.dart
index 9c7a2e5..7ffb090 100644
--- a/scripts/fxtest/test/setup_test.dart
+++ b/scripts/fxtest/test/setup_test.dart
@@ -391,13 +391,14 @@
   });
 
   group('build targets', () {
+    var testsConfig = TestsConfig.fromRawArgs(
+      rawArgs: [],
+      fxEnv: FakeFxEnv.shared,
+    );
+
     List<TestBundle> createBundlesFromJson(
       List<Map<String, dynamic>> json,
     ) {
-      var testsConfig = TestsConfig.fromRawArgs(
-        rawArgs: [],
-        fxEnv: FakeFxEnv.shared,
-      );
       var cmd = FuchsiaTestCommand.fromConfig(
         testsConfig,
         testRunnerBuilder: (testsConfig) => TestRunner(),
@@ -411,123 +412,170 @@
 
     test('host tests only build the tests path', () async {
       expect(
-          TestBundle.calculateMinimalBuildTargets(createBundlesFromJson([
-            {
-              'environments': [],
-              'test': {
-                'command': ['some', 'command'],
-                'cpu': 'x64',
-                'label': '//scripts/lib:lib_tests(//build/toolchain:host_x64)',
-                'name': 'lib_tests',
-                'os': 'linux',
-                'path': 'host_x64/lib_tests',
-                'runtime_deps': 'host_x64/gen/scripts/lib/lib_tests.deps.json'
-              }
-            }
-          ])),
+          TestBundle.calculateMinimalBuildTargets(
+              testsConfig,
+              createBundlesFromJson([
+                {
+                  'environments': [],
+                  'test': {
+                    'command': ['some', 'command'],
+                    'cpu': 'x64',
+                    'label':
+                        '//scripts/lib:lib_tests(//build/toolchain:host_x64)',
+                    'name': 'lib_tests',
+                    'os': 'linux',
+                    'path': 'host_x64/lib_tests',
+                    'runtime_deps':
+                        'host_x64/gen/scripts/lib/lib_tests.deps.json'
+                  }
+                }
+              ])),
           equals(['host_x64/lib_tests']));
     });
 
-    test('component tests only build updates', () async {
+    test('component tests only build the component', () async {
       expect(
-          TestBundle.calculateMinimalBuildTargets(createBundlesFromJson([
-            {
-              'environments': [],
-              'test': {
-                'cpu': 'x64',
-                'label': '//scripts/lib:lib_tests(//build/toolchain:host_x64)',
-                'name': 'lib_tests',
-                'os': 'fuchsia',
-                'package_url':
-                    'fuchsia-pkg://fuchsia.com/pkg-name#meta/component-name.cmx',
-                'runtime_deps': 'host_x64/gen/scripts/lib/lib_tests.deps.json'
-              }
-            }
-          ])),
-          equals(['updates']));
+          TestBundle.calculateMinimalBuildTargets(
+              testsConfig,
+              createBundlesFromJson([
+                {
+                  'environments': [],
+                  'test': {
+                    'cpu': 'x64',
+                    'label':
+                        '//scripts/lib:lib_tests(//build/toolchain:host_x64)',
+                    'name': 'lib_tests',
+                    'os': 'fuchsia',
+                    'package_url':
+                        'fuchsia-pkg://fuchsia.com/pkg-name#meta/component-name.cmx',
+                    'runtime_deps':
+                        'host_x64/gen/scripts/lib/lib_tests.deps.json'
+                  }
+                }
+              ])),
+          equals(['scripts/lib:lib_tests']));
+    });
+
+    test('component tests only build the package', () async {
+      expect(
+          TestBundle.calculateMinimalBuildTargets(
+              testsConfig,
+              createBundlesFromJson([
+                {
+                  'environments': [],
+                  'test': {
+                    'cpu': 'x64',
+                    'label':
+                        '//scripts/lib:lib_tests(//build/toolchain:host_x64)',
+                    'package_label':
+                        '//scripts/lib:test_package(//build/toolchain:host_x64)',
+                    'name': 'lib_tests',
+                    'os': 'fuchsia',
+                    'package_url':
+                        'fuchsia-pkg://fuchsia.com/pkg-name#meta/component-name.cmx',
+                    'runtime_deps':
+                        'host_x64/gen/scripts/lib/lib_tests.deps.json'
+                  }
+                }
+              ])),
+          equals(['scripts/lib:test_package']));
     });
 
     test(
-        'mixed host and component tests build both updates and the host test path',
+        'mixed host and component tests build both the component and the host test path',
         () async {
       expect(
-          TestBundle.calculateMinimalBuildTargets(createBundlesFromJson([
-            {
-              'environments': [],
-              'test': {
-                'cpu': 'x64',
-                'label': '//scripts/lib:lib_tests(//build/toolchain:host_x64)',
-                'name': 'lib_tests',
-                'os': 'fuchsia',
-                'package_url':
-                    'fuchsia-pkg://fuchsia.com/pkg-name#meta/component-name.cmx',
-                'runtime_deps': 'host_x64/gen/scripts/lib/lib_tests.deps.json'
-              }
-            },
-            {
-              'environments': [],
-              'test': {
-                'command': ['some', 'command'],
-                'cpu': 'x64',
-                'label': '//scripts/lib:lib_tests(//build/toolchain:host_x64)',
-                'name': 'lib_tests',
-                'os': 'linux',
-                'path': 'host_x64/lib_tests',
-                'runtime_deps': 'host_x64/gen/scripts/lib/lib_tests.deps.json'
-              }
-            }
-          ])),
-          unorderedEquals(['updates', 'host_x64/lib_tests']));
+          TestBundle.calculateMinimalBuildTargets(
+              testsConfig,
+              createBundlesFromJson([
+                {
+                  'environments': [],
+                  'test': {
+                    'cpu': 'x64',
+                    'label':
+                        '//scripts/lib:lib_tests(//build/toolchain:host_x64)',
+                    'name': 'lib_tests',
+                    'os': 'fuchsia',
+                    'package_url':
+                        'fuchsia-pkg://fuchsia.com/pkg-name#meta/component-name.cmx',
+                    'runtime_deps':
+                        'host_x64/gen/scripts/lib/lib_tests.deps.json'
+                  }
+                },
+                {
+                  'environments': [],
+                  'test': {
+                    'command': ['some', 'command'],
+                    'cpu': 'x64',
+                    'label':
+                        '//scripts/lib:lib_tests(//build/toolchain:host_x64)',
+                    'name': 'lib_tests',
+                    'os': 'linux',
+                    'path': 'host_x64/lib_tests',
+                    'runtime_deps':
+                        'host_x64/gen/scripts/lib/lib_tests.deps.json'
+                  }
+                }
+              ])),
+          unorderedEquals(['scripts/lib:lib_tests', 'host_x64/lib_tests']));
     });
 
     test('an e2e test forces a full rebuild (default target)', () async {
       expect(
-          TestBundle.calculateMinimalBuildTargets(createBundlesFromJson([
-            // e2e test
-            {
-              'environments': [
+          TestBundle.calculateMinimalBuildTargets(
+              testsConfig,
+              createBundlesFromJson([
+                // e2e test
                 {
-                  'dimensions': {
-                    'device_type': 'asdf',
-                  },
+                  'environments': [
+                    {
+                      'dimensions': {
+                        'device_type': 'asdf',
+                      },
+                    },
+                  ],
+                  'test': {
+                    'cpu': 'x64',
+                    'label':
+                        '//scripts/e2e:e2e_tests(//build/toolchain:host_x64)',
+                    'name': 'e2e_tests',
+                    'os': 'linux',
+                    'path': 'path/to/e2e_tests',
+                    'runtime_deps':
+                        'host_x64/gen/scripts/e2e/e2e_tests.deps.json'
+                  }
                 },
-              ],
-              'test': {
-                'cpu': 'x64',
-                'label': '//scripts/e2e:e2e_tests(//build/toolchain:host_x64)',
-                'name': 'e2e_tests',
-                'os': 'linux',
-                'path': 'path/to/e2e_tests',
-                'runtime_deps': 'host_x64/gen/scripts/e2e/e2e_tests.deps.json'
-              }
-            },
-            // component test
-            {
-              'environments': [],
-              'test': {
-                'cpu': 'x64',
-                'label': '//scripts/lib:lib_tests(//build/toolchain:host_x64)',
-                'name': 'lib_tests',
-                'os': 'fuchsia',
-                'package_url':
-                    'fuchsia-pkg://fuchsia.com/pkg-name#meta/component-name.cmx',
-                'runtime_deps': 'host_x64/gen/scripts/lib/lib_tests.deps.json'
-              }
-            },
-            // host test
-            {
-              'environments': [],
-              'test': {
-                'command': ['some', 'command'],
-                'cpu': 'x64',
-                'label': '//scripts/lib:lib_tests(//build/toolchain:host_x64)',
-                'name': 'lib_tests',
-                'os': 'linux',
-                'path': 'host_x64/lib_tests',
-                'runtime_deps': 'host_x64/gen/scripts/lib/lib_tests.deps.json'
-              }
-            }
-          ])),
+                // component test
+                {
+                  'environments': [],
+                  'test': {
+                    'cpu': 'x64',
+                    'label':
+                        '//scripts/lib:lib_tests(//build/toolchain:host_x64)',
+                    'name': 'lib_tests',
+                    'os': 'fuchsia',
+                    'package_url':
+                        'fuchsia-pkg://fuchsia.com/pkg-name#meta/component-name.cmx',
+                    'runtime_deps':
+                        'host_x64/gen/scripts/lib/lib_tests.deps.json'
+                  }
+                },
+                // host test
+                {
+                  'environments': [],
+                  'test': {
+                    'command': ['some', 'command'],
+                    'cpu': 'x64',
+                    'label':
+                        '//scripts/lib:lib_tests(//build/toolchain:host_x64)',
+                    'name': 'lib_tests',
+                    'os': 'linux',
+                    'path': 'host_x64/lib_tests',
+                    'runtime_deps':
+                        'host_x64/gen/scripts/lib/lib_tests.deps.json'
+                  }
+                }
+              ])),
           // calculateMinimalBuildTargets returns null for a full build
           // (default target)
           <String>{});
diff --git a/scripts/fxutils/BUILD.gn b/scripts/fxutils/BUILD.gn
index 833fa29..6daea9f 100644
--- a/scripts/fxutils/BUILD.gn
+++ b/scripts/fxutils/BUILD.gn
@@ -23,6 +23,7 @@
     "src/exceptions.dart",
     "src/fx.dart",
     "src/fx_env.dart",
+    "src/gn_helper.dart",
     "src/list_iterator.dart",
     "src/mock_process.dart",
     "src/process_launcher.dart",
@@ -42,6 +43,7 @@
 dart_test("fxutils_tests") {
   sources = [
     "fxutils_test.dart",
+    "gn_helper_test.dart",
     "list_iterator_test.dart",
     "process_test.dart",
   ]
diff --git a/scripts/fxutils/lib/fxutils.dart b/scripts/fxutils/lib/fxutils.dart
index 314778d..08bd874 100644
--- a/scripts/fxutils/lib/fxutils.dart
+++ b/scripts/fxutils/lib/fxutils.dart
@@ -9,6 +9,7 @@
 export 'src/exceptions.dart';
 export 'src/fx.dart';
 export 'src/fx_env.dart';
+export 'src/gn_helper.dart';
 export 'src/list_iterator.dart';
 export 'src/mock_process.dart';
 export 'src/process_launcher.dart';
diff --git a/scripts/fxutils/lib/src/fx_env.dart b/scripts/fxutils/lib/src/fx_env.dart
index cf65b08..9e5627dc 100644
--- a/scripts/fxutils/lib/src/fx_env.dart
+++ b/scripts/fxutils/lib/src/fx_env.dart
@@ -19,6 +19,8 @@
   String? get zirconToolsDir;
   String get cwd;
 
+  bool isFeatureEnabled(String featureName) => true;
+
   /// Relative path to the current output directory from the root of the Fuchsia
   /// checkout.
   String? get relativeOutputDir => outputDir?.substring(fuchsiaDir!.length);
@@ -97,4 +99,11 @@
   /// Current working directory. Pulled from the OS.
   @override
   String get cwd => _envReader.getCwd();
+
+  /// Enable/Disable state of an optional feature.
+  /// See //tools/devshell/lib/fx-optional-features.sh for more information.
+  @override
+  bool isFeatureEnabled(String featureName) {
+    return _envReader.getEnv('FUCHSIA_DISABLED_$featureName') != '1';
+  }
 }
diff --git a/scripts/fxutils/lib/src/gn_helper.dart b/scripts/fxutils/lib/src/gn_helper.dart
new file mode 100644
index 0000000..e3e8257
--- /dev/null
+++ b/scripts/fxutils/lib/src/gn_helper.dart
@@ -0,0 +1,11 @@
+// Copyright 2020 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// regexp that extracts a suitable 'fx build' target from a "label" field
+// from a test_spec entry in tests.json.
+// For example, it extracts "pa/th:label" from "//pa/th:label(//tool/chain:toolchain)"
+RegExp _testLabelRe = RegExp(r'\/\/(.*)\((\/\/.*)\)');
+String? getBuildTarget(String? testLabel) {
+  return _testLabelRe.firstMatch(testLabel!)?.group(1);
+}
diff --git a/scripts/fxutils/test/gn_helper_test.dart b/scripts/fxutils/test/gn_helper_test.dart
new file mode 100644
index 0000000..7c07628
--- /dev/null
+++ b/scripts/fxutils/test/gn_helper_test.dart
@@ -0,0 +1,25 @@
+// Copyright 2020 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import 'package:fxutils/fxutils.dart';
+import 'package:test/test.dart';
+
+void main() {
+  group('getBuildTarget', () {
+    test('should extract correct build target', () {
+      expect(
+          getBuildTarget(
+              '//src/dir:name-with-dash_and_underscore(//build/toolchain/fuchsia:x64)'),
+          'src/dir:name-with-dash_and_underscore');
+    });
+
+    test('should return null if cannot match', () {
+      expect(getBuildTarget('//src/dir:no_toolchain'), null);
+      expect(
+          getBuildTarget(
+              'src/dir:no_doubleslash(//build/toolchain/fuchsia:x64)'),
+          null);
+    });
+  });
+}
diff --git a/sdk/cts/build/cts_element.gni b/sdk/cts/build/cts_element.gni
index 6cf1f39..6504c58 100644
--- a/sdk/cts/build/cts_element.gni
+++ b/sdk/cts/build/cts_element.gni
@@ -3,7 +3,6 @@
 # found in the LICENSE file.
 
 import("//sdk/cts/build/allowed_cts_deps.gni")
-import("//sdk/cts/build/cts_vars.gni")
 
 # Defines a CTS element.
 #
diff --git a/sdk/cts/build/cts_executable.gni b/sdk/cts/build/cts_executable.gni
index 84b4f0d..68180fa 100644
--- a/sdk/cts/build/cts_executable.gni
+++ b/sdk/cts/build/cts_executable.gni
@@ -3,7 +3,7 @@
 # found in the LICENSE file.
 
 import("//sdk/cts/build/cts_element.gni")
-import("//sdk/cts/build/cts_vars.gni")
+import("//sdk/cts/build/write_cts_file.gni")
 
 # An executable that can be used in CTS.
 #
@@ -40,12 +40,8 @@
     }
   }
 
-  # Creates a file to enable other CTS targets to depend on this test package.
-  target_build_dir = get_label_info(":$target_name", "dir")
-  target_build_dir = string_replace(target_build_dir, "//", "/")
-  write_file(
-      "${root_build_dir}/cts/${target_build_dir}/${target_name}.${cts_extension}",
-      "true")
+  write_cts_file(target_name) {
+  }
 }
 
 set_defaults("cts_executable") {
diff --git a/sdk/cts/build/cts_fuchsia_component.gni b/sdk/cts/build/cts_fuchsia_component.gni
index aa22ce4..ca47980 100644
--- a/sdk/cts/build/cts_fuchsia_component.gni
+++ b/sdk/cts/build/cts_fuchsia_component.gni
@@ -3,7 +3,7 @@
 # found in the LICENSE file.
 
 import("//sdk/cts/build/cts_element.gni")
-import("//sdk/cts/build/cts_vars.gni")
+import("//sdk/cts/build/write_cts_file.gni")
 import("//src/sys/build/components.gni")
 
 # A Fuchsia component that can be used in CTS.
@@ -33,10 +33,6 @@
     }
   }
 
-  # Creates a file to enable other CTS targets to depend on this test package.
-  target_build_dir = get_label_info(":$target_name", "dir")
-  target_build_dir = string_replace(target_build_dir, "//", "/")
-  write_file(
-      "${root_build_dir}/cts/${target_build_dir}/${target_name}.${cts_extension}",
-      true)
+  write_cts_file(target_name) {
+  }
 }
diff --git a/sdk/cts/build/cts_fuchsia_package.gni b/sdk/cts/build/cts_fuchsia_package.gni
index 24eaf6f..b87fc57 100644
--- a/sdk/cts/build/cts_fuchsia_package.gni
+++ b/sdk/cts/build/cts_fuchsia_package.gni
@@ -3,7 +3,7 @@
 # found in the LICENSE file.
 
 import("//sdk/cts/build/cts_element.gni")
-import("//sdk/cts/build/cts_vars.gni")
+import("//sdk/cts/build/write_cts_file.gni")
 import("//src/sys/build/fuchsia_package.gni")
 
 # A Fuchsia package that can be used in CTS.
@@ -24,10 +24,6 @@
     }
   }
 
-  # Creates a file to enable other CTS targets to depend on this test package.
-  target_build_dir = get_label_info(":$target_name", "dir")
-  target_build_dir = string_replace(target_build_dir, "//", "/")
-  write_file(
-      "${root_build_dir}/cts/${target_build_dir}/${target_name}.${cts_extension}",
-      true)
+  write_cts_file(target_name) {
+  }
 }
diff --git a/sdk/cts/build/cts_fuchsia_test_package.gni b/sdk/cts/build/cts_fuchsia_test_package.gni
index 1973fe2..1f961b4 100644
--- a/sdk/cts/build/cts_fuchsia_test_package.gni
+++ b/sdk/cts/build/cts_fuchsia_test_package.gni
@@ -3,7 +3,7 @@
 # found in the LICENSE file.
 
 import("//sdk/cts/build/cts_element.gni")
-import("//sdk/cts/build/cts_vars.gni")
+import("//sdk/cts/build/write_cts_file.gni")
 import("//src/sys/build/components.gni")
 
 # Defines a Compatibility Test Suite package.
@@ -44,10 +44,6 @@
     }
   }
 
-  # Creates a file to enable other CTS targets to depend on this test package.
-  target_build_dir = get_label_info(":$target_name", "dir")
-  target_build_dir = string_replace(target_build_dir, "//", "/")
-  write_file(
-      "${root_build_dir}/cts/${target_build_dir}/${target_name}.${cts_extension}",
-      true)
+  write_cts_file(target_name) {
+  }
 }
diff --git a/sdk/cts/build/cts_fuchsia_unittest_component.gni b/sdk/cts/build/cts_fuchsia_unittest_component.gni
index 3a45ec2..b30b613 100644
--- a/sdk/cts/build/cts_fuchsia_unittest_component.gni
+++ b/sdk/cts/build/cts_fuchsia_unittest_component.gni
@@ -2,7 +2,7 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 import("//sdk/cts/build/cts_element.gni")
-import("//sdk/cts/build/cts_vars.gni")
+import("//sdk/cts/build/write_cts_file.gni")
 import("//src/sys/build/components.gni")
 
 # Defines a Compatibility Test Suite unittest component.
@@ -33,10 +33,6 @@
     }
   }
 
-  # Creates a file to enable other CTS targets to depend on this test package.
-  target_build_dir = get_label_info(":$target_name", "dir")
-  target_build_dir = string_replace(target_build_dir, "//", "/")
-  write_file(
-      "${root_build_dir}/cts/${target_build_dir}/${target_name}.${cts_extension}",
-      true)
+  write_cts_file(target_name) {
+  }
 }
diff --git a/sdk/cts/build/cts_host_test_data.gni b/sdk/cts/build/cts_host_test_data.gni
index 589c65a..4439c6f 100644
--- a/sdk/cts/build/cts_host_test_data.gni
+++ b/sdk/cts/build/cts_host_test_data.gni
@@ -1,11 +1,10 @@
 # Copyright 2020 The Fuchsia Authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
-
 import("//build/host.gni")
 import("//build/testing/host_test_data.gni")
 import("//sdk/cts/build/cts_element.gni")
-import("//sdk/cts/build/cts_vars.gni")
+import("//sdk/cts/build/write_cts_file.gni")
 
 template("cts_host_test_data") {
   if (defined(invoker.deps) && invoker.deps != []) {
@@ -21,9 +20,6 @@
     }
   }
 
-  target_build_dir = get_label_info(":$target_name", "dir")
-  target_build_dir = string_replace(target_build_dir, "//", "/")
-  write_file(
-      "${root_build_dir}/cts/${target_build_dir}/${target_name}.${cts_extension}",
-      true)
+  write_cts_file(target_name) {
+  }
 }
diff --git a/sdk/cts/build/cts_resource.gni b/sdk/cts/build/cts_resource.gni
index de4ed52..36c6363 100644
--- a/sdk/cts/build/cts_resource.gni
+++ b/sdk/cts/build/cts_resource.gni
@@ -4,7 +4,7 @@
 
 import("//build/dist/resource.gni")
 import("//sdk/cts/build/cts_element.gni")
-import("//sdk/cts/build/cts_vars.gni")
+import("//sdk/cts/build/write_cts_file.gni")
 
 # A resource that can be used in CTS.
 #
@@ -50,10 +50,6 @@
     }
   }
 
-  # Creates a file to enable other CTS targets to depend on this test package.
-  target_build_dir = get_label_info(":$target_name", "dir")
-  target_build_dir = string_replace(target_build_dir, "//", "/")
-  write_file(
-      "${root_build_dir}/cts/${target_build_dir}/${target_name}.${cts_extension}",
-      "true")
+  write_cts_file(target_name) {
+  }
 }
diff --git a/sdk/cts/build/cts_source_set.gni b/sdk/cts/build/cts_source_set.gni
index bad79d6..125e4c65 100644
--- a/sdk/cts/build/cts_source_set.gni
+++ b/sdk/cts/build/cts_source_set.gni
@@ -3,7 +3,7 @@
 # found in the LICENSE file.
 
 import("//sdk/cts/build/cts_element.gni")
-import("//sdk/cts/build/cts_vars.gni")
+import("//sdk/cts/build/write_cts_file.gni")
 
 # A source_set that can be used in CTS.
 #
@@ -34,12 +34,8 @@
     }
   }
 
-  # Creates a file to enable other CTS targets to depend on this test package.
-  target_build_dir = get_label_info(":$target_name", "dir")
-  target_build_dir = string_replace(target_build_dir, "//", "/")
-  write_file(
-      "${root_build_dir}/cts/${target_build_dir}/${target_name}.${cts_extension}",
-      true)
+  write_cts_file(target_name) {
+  }
 }
 
 set_defaults("cts_source_set") {
diff --git a/sdk/cts/build/cts_vars.gni b/sdk/cts/build/cts_vars.gni
deleted file mode 100644
index 84675c1..0000000
--- a/sdk/cts/build/cts_vars.gni
+++ /dev/null
@@ -1,5 +0,0 @@
-# Copyright 2020 The Fuchsia Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-cts_extension = "this_is_cts"
diff --git a/sdk/cts/build/dart/cts_dart_library.gni b/sdk/cts/build/dart/cts_dart_library.gni
index 60fc279..36f2c66 100644
--- a/sdk/cts/build/dart/cts_dart_library.gni
+++ b/sdk/cts/build/dart/cts_dart_library.gni
@@ -1,10 +1,9 @@
 # Copyright 2020 The Fuchsia Authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
-
 import("//build/dart/dart_library.gni")
 import("//sdk/cts/build/cts_element.gni")
-import("//sdk/cts/build/cts_vars.gni")
+import("//sdk/cts/build/write_cts_file.gni")
 
 # A Dart library that can be used in CTS.
 #
@@ -37,10 +36,6 @@
     }
   }
 
-  # Creates a file to enable other CTS targets to depend on this target.
-  target_build_dir = get_label_info(":$target_name", "dir")
-  target_build_dir = string_replace(target_build_dir, "//", "/")
-  write_file(
-      "${root_build_dir}/cts/${target_build_dir}/${target_name}.${cts_extension}",
-      true)
+  write_cts_file(target_name) {
+  }
 }
diff --git a/sdk/cts/build/dart/cts_dart_test.gni b/sdk/cts/build/dart/cts_dart_test.gni
index 5ee6772..f7e722a 100644
--- a/sdk/cts/build/dart/cts_dart_test.gni
+++ b/sdk/cts/build/dart/cts_dart_test.gni
@@ -4,7 +4,7 @@
 
 import("//build/dart/test.gni")
 import("//sdk/cts/build/cts_element.gni")
-import("//sdk/cts/build/cts_vars.gni")
+import("//sdk/cts/build/write_cts_file.gni")
 
 template("cts_dart_test") {
   invoker_deps = []
@@ -34,10 +34,6 @@
     }
   }
 
-  # Creates a file to enable other CTS targets to depend on this target.
-  target_build_dir = get_label_info(":$target_name", "dir")
-  target_build_dir = string_replace(target_build_dir, "//", "/")
-  write_file(
-      "${root_build_dir}/cts/${target_build_dir}/${target_name}.${cts_extension}",
-      true)
+  write_cts_file(target_name) {
+  }
 }
diff --git a/sdk/cts/build/fidl/cts_fidl.gni b/sdk/cts/build/fidl/cts_fidl.gni
index 14f30ed80..1497a9d 100644
--- a/sdk/cts/build/fidl/cts_fidl.gni
+++ b/sdk/cts/build/fidl/cts_fidl.gni
@@ -4,7 +4,7 @@
 
 import("//build/fidl/fidl.gni")
 import("//sdk/cts/build/cts_element.gni")
-import("//sdk/cts/build/cts_vars.gni")
+import("//sdk/cts/build/write_cts_file.gni")
 
 # Declares a FIDL Library that can be used in CTS.
 #
@@ -27,10 +27,6 @@
     }
   }
 
-  # Creates a file to enable other CTS targets to depend on this test package.
-  target_build_dir = get_label_info(":$target_name", "dir")
-  target_build_dir = string_replace(target_build_dir, "//", "/")
-  write_file(
-      "${root_build_dir}/cts/${target_build_dir}/${target_name}.${cts_extension}",
-      "true")
+  write_cts_file(target_name) {
+  }
 }
diff --git a/sdk/cts/build/write_cts_file.gni b/sdk/cts/build/write_cts_file.gni
new file mode 100644
index 0000000..518bf4a
--- /dev/null
+++ b/sdk/cts/build/write_cts_file.gni
@@ -0,0 +1,17 @@
+# Copyright 2020 The Fuchsia Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# A helper rule to write a file indicating that a target is a CTS target.
+#
+# This rule should not be called directly, but through cts_* other build rules.
+template("write_cts_file") {
+  not_needed(invoker, "*")
+  cts_extension = "this_is_cts"
+
+  target_build_dir = get_label_info(":$target_name", "dir")
+  target_build_dir = string_replace(target_build_dir, "//", "/")
+  write_file(
+      "${root_build_dir}/cts/${target_build_dir}/${target_name}.${cts_extension}",
+      true)
+}
diff --git a/sdk/fidl/fuchsia.time/BUILD.gn b/sdk/fidl/fuchsia.time/BUILD.gn
index 414b71e..eb0b335 100644
--- a/sdk/fidl/fuchsia.time/BUILD.gn
+++ b/sdk/fidl/fuchsia.time/BUILD.gn
@@ -6,9 +6,6 @@
 
 fidl("fuchsia.time") {
   sdk_category = "internal"
-  sources = [
-    "maintenance.fidl",
-    "utc.fidl",
-  ]
+  sources = [ "maintenance.fidl" ]
   public_deps = [ "//zircon/vdso/zx" ]
 }
diff --git a/sdk/fidl/fuchsia.time/utc.fidl b/sdk/fidl/fuchsia.time/utc.fidl
deleted file mode 100644
index e376906..0000000
--- a/sdk/fidl/fuchsia.time/utc.fidl
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2019 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-library fuchsia.time;
-using zx;
-
-/// Metadata about a device's approximation of UTC time, commonly referred to as "system time".
-[Discoverable, Deprecated = "Wait for the ZX_CLOCK_STARTED signal on the UTC clock instead"]
-protocol Utc {
-    /// Notifies clients of updates to the UTC timeline. The first call on a channel returns
-    /// immediately, and subsequent calls on the same channel will return when the state
-    /// has changed.
-    WatchState() -> (UtcState state);
-};
-
-/// Describes the state of the clock.
-table UtcState {
-    /// The monotonic time at which this `UtcState` was observed.
-    1: zx.time timestamp;
-    /// The source of our current UTC approximation.
-    2: UtcSource source;
-};
-
-/// Describes the source from which the current UTC approximation was retrieved.
-enum UtcSource {
-    /// The clock has been initialized to a known-prior reference time but may be highly inaccurate.
-    BACKSTOP = 2;
-    /// The clock has been initialized from a time source that is believed to be to accurate but
-    /// that could not be verified. An example might be a battery-backed real time clock.
-    UNVERIFIED = 4;
-    /// The clock has been initialized from a suitably accurate and verified external time source.
-    /// For many devices the most common external time source is a network time server using some
-    /// protocol with cryptographic authentication such as Roughtime or HTTPSdate.
-    EXTERNAL = 3;
-};
diff --git a/src/connectivity/bluetooth/core/bt-init/src/config.rs b/src/connectivity/bluetooth/core/bt-init/src/config.rs
index 97f0dd5..5ca39c9f 100644
--- a/src/connectivity/bluetooth/core/bt-init/src/config.rs
+++ b/src/connectivity/bluetooth/core/bt-init/src/config.rs
@@ -3,9 +3,9 @@
 // found in the LICENSE file.
 
 use {
-    anyhow::Error,
+    anyhow::{Context, Error},
     fidl_fuchsia_bluetooth_control::{ControlMarker, InputCapabilityType, OutputCapabilityType},
-    fuchsia_component::client::connect_to_service,
+    fuchsia_component::client::App,
     serde::{Deserialize, Serialize},
     serde_json,
     std::{fs::OpenOptions, io::Read},
@@ -53,15 +53,16 @@
         let mut contents = String::new();
         config.read_to_string(&mut contents).expect("The bt-init config file is corrupted");
 
-        Ok(serde_json::from_str(contents.as_str())?)
+        Ok(serde_json::from_str(contents.as_str()).context("Failed to parse config file")?)
     }
 
     pub fn autostart_snoop(&self) -> bool {
         self.autostart_snoop
     }
 
-    pub async fn set_capabilities(&self) -> Result<(), Error> {
-        let bt_svc = connect_to_service::<ControlMarker>()
+    pub async fn set_capabilities(&self, bt_gap: &App) -> Result<(), Error> {
+        let bt_svc = bt_gap
+            .connect_to_service::<ControlMarker>()
             .expect("failed to connect to bluetooth control interface");
         bt_svc.set_io_capabilities(self.io.input, self.io.output).map_err(Into::into)
     }
diff --git a/src/connectivity/bluetooth/core/bt-init/src/main.rs b/src/connectivity/bluetooth/core/bt-init/src/main.rs
index 6ffc814..3189654 100644
--- a/src/connectivity/bluetooth/core/bt-init/src/main.rs
+++ b/src/connectivity/bluetooth/core/bt-init/src/main.rs
@@ -15,10 +15,7 @@
     },
     fuchsia_async as fasync,
     fuchsia_component::{client, fuchsia_single_component_package_url, server},
-    futures::{
-        future::{self, try_join},
-        FutureExt, StreamExt,
-    },
+    futures::{future, StreamExt},
     log::{info, warn},
 };
 
@@ -29,49 +26,66 @@
     info!("Starting bt-init...");
 
     let mut executor = fasync::Executor::new().context("Error creating executor")?;
-    let cfg = config::Config::load()?;
+    let cfg = config::Config::load().context("Error loading config")?;
 
     // Start bt-snoop service before anything else and hold onto the connection until bt-init exits.
     let snoop_connection;
     if cfg.autostart_snoop() {
+        info!("Starting snoop service...");
         snoop_connection = client::connect_to_service::<SnoopMarker>();
         if let Err(e) = snoop_connection {
             warn!("Failed to start snoop service: {}", e);
+        } else {
+            info!("Snoop service started successfully");
         }
     }
 
-    let launcher = client::launcher().expect("Failed to launch bt-gap (bluetooth) service");
+    info!("Launching BT-GAP service...");
+    let launcher = client::launcher()
+        .expect("Failed to launch bt-gap (bluetooth) service; could not access launcher service");
     let bt_gap = client::launch(
         &launcher,
         fuchsia_single_component_package_url!("bt-gap").to_string(),
         None,
-    )?;
+    )
+    .context("Error launching BT-GAP component")?;
+    info!("BT-GAP launched successfully");
 
-    let mut fs = server::ServiceFs::new();
-    fs.dir("svc")
-        .add_service_at(AccessMarker::NAME, |chan| Some((AccessMarker::NAME, chan)))
-        .add_service_at(BootstrapMarker::NAME, |chan| Some((BootstrapMarker::NAME, chan)))
-        .add_service_at(ConfigurationMarker::NAME, |chan| Some((ConfigurationMarker::NAME, chan)))
-        .add_service_at(ControlMarker::NAME, |chan| Some((ControlMarker::NAME, chan)))
-        .add_service_at(CentralMarker::NAME, |chan| Some((CentralMarker::NAME, chan)))
-        .add_service_at(HostWatcherMarker::NAME, |chan| Some((HostWatcherMarker::NAME, chan)))
-        .add_service_at(PeripheralMarker::NAME, |chan| Some((PeripheralMarker::NAME, chan)))
-        .add_service_at(ProfileMarker::NAME, |chan| Some((ProfileMarker::NAME, chan)))
-        .add_service_at(Server_Marker::NAME, |chan| Some((Server_Marker::NAME, chan)));
-    fs.take_and_serve_directory_handle()?;
-    let server = fs
-        .for_each(move |(name, chan)| {
+    let run_bluetooth = async move {
+        info!("Configuring BT-GAP");
+        // First, configure bt-gap
+        cfg.set_capabilities(&bt_gap).await.context("Error configuring BT-GAP")?;
+        info!("BT-GAP configuration sent successfully");
+
+        // Then, we can begin serving its services
+        let mut fs = server::ServiceFs::new();
+        fs.dir("svc")
+            .add_service_at(AccessMarker::NAME, |chan| Some((AccessMarker::NAME, chan)))
+            .add_service_at(BootstrapMarker::NAME, |chan| Some((BootstrapMarker::NAME, chan)))
+            .add_service_at(ConfigurationMarker::NAME, |chan| {
+                Some((ConfigurationMarker::NAME, chan))
+            })
+            .add_service_at(ControlMarker::NAME, |chan| Some((ControlMarker::NAME, chan)))
+            .add_service_at(CentralMarker::NAME, |chan| Some((CentralMarker::NAME, chan)))
+            .add_service_at(HostWatcherMarker::NAME, |chan| Some((HostWatcherMarker::NAME, chan)))
+            .add_service_at(PeripheralMarker::NAME, |chan| Some((PeripheralMarker::NAME, chan)))
+            .add_service_at(ProfileMarker::NAME, |chan| Some((ProfileMarker::NAME, chan)))
+            .add_service_at(Server_Marker::NAME, |chan| Some((Server_Marker::NAME, chan)));
+        fs.take_and_serve_directory_handle()?;
+
+        info!("Initialization complete, begin serving FIDL protocols");
+        fs.for_each(move |(name, chan)| {
             info!("Passing {} Handle to bt-gap", name);
-            let _ = bt_gap.pass_to_named_service(name, chan);
+            if let Err(e) = bt_gap.pass_to_named_service(name, chan) {
+                warn!("Error passing {} handle to bt-gap: {:?}", name, e);
+            }
             future::ready(())
         })
-        .map(Ok);
-
-    let io_config_fut = cfg.set_capabilities();
+        .await;
+        Ok::<(), Error>(())
+    };
 
     executor
-        .run_singlethreaded(try_join(server, io_config_fut))
-        .context("bt-init failed to execute future")
-        .map_err(|e| e.into())
-        .map(|_| ())
+        .run_singlethreaded(run_bluetooth)
+        .context("bt-init encountered an error during execution")
 }
diff --git a/src/connectivity/bluetooth/lib/BUILD.gn b/src/connectivity/bluetooth/lib/BUILD.gn
index 2073990..d5cf3ad 100644
--- a/src/connectivity/bluetooth/lib/BUILD.gn
+++ b/src/connectivity/bluetooth/lib/BUILD.gn
@@ -19,7 +19,7 @@
     "fidl",
     "fidl:tests",
     "fuchsia-audio-codec",
-    "fuchsia-audio-codec:fuchsia-audio-codec-tests",
+    "fuchsia-audio-codec:tests",
     "fuchsia-audio-device-output",
     "fuchsia-audio-device-output:tests",
     "fuchsia-bluetooth",
diff --git a/src/connectivity/bluetooth/lib/fuchsia-audio-codec/BUILD.gn b/src/connectivity/bluetooth/lib/fuchsia-audio-codec/BUILD.gn
index 6f32c41..a247792 100644
--- a/src/connectivity/bluetooth/lib/fuchsia-audio-codec/BUILD.gn
+++ b/src/connectivity/bluetooth/lib/fuchsia-audio-codec/BUILD.gn
@@ -2,10 +2,8 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-import("//build/package.gni")
 import("//build/rust/rustc_library.gni")
-import("//build/test/test_package.gni")
-import("//build/testing/environments.gni")
+import("//src/sys/build/components.gni")
 
 rustc_library("fuchsia-audio-codec") {
   name = "fuchsia_audio_codec"
@@ -47,21 +45,21 @@
   ]
 }
 
-test_package("fuchsia-audio-codec-tests") {
-  deps = [ ":fuchsia-audio-codec_test" ]
+resource("test-data") {
+  sources = [ rebase_path(
+          "//src/media/stream_processors/test/audio_decoder_test/test_data/s16le44100mono.sbc") ]
+  outputs = [ "data/{{source_file_part}}" ]
+}
 
-  tests = [
-    {
-      name = "fuchsia_audio_codec_lib_test"
-      environments = basic_envs
-    },
+fuchsia_unittest_package("fuchsia-audio-codec-tests") {
+  manifest = "meta/fuchsia_audio_codec_lib_test.cmx"
+  deps = [
+    ":fuchsia-audio-codec_test",
+    ":test-data",
   ]
+}
 
-  resources = [
-    {
-      path = rebase_path(
-              "//src/media/stream_processors/test/audio_decoder_test/test_data/s16le44100mono.sbc")
-      dest = "s16le44100mono.sbc"
-    },
-  ]
+group("tests") {
+  testonly = true
+  deps = [ ":fuchsia-audio-codec-tests" ]
 }
diff --git a/src/connectivity/bluetooth/lib/fuchsia-audio-codec/meta/fuchsia_audio_codec_lib_test.cmx b/src/connectivity/bluetooth/lib/fuchsia-audio-codec/meta/fuchsia_audio_codec_lib_test.cmx
index c15b82b..3b25b5a 100644
--- a/src/connectivity/bluetooth/lib/fuchsia-audio-codec/meta/fuchsia_audio_codec_lib_test.cmx
+++ b/src/connectivity/bluetooth/lib/fuchsia-audio-codec/meta/fuchsia_audio_codec_lib_test.cmx
@@ -11,7 +11,7 @@
         "sdk/lib/diagnostics/syslog/client.shard.cmx"
     ],
     "program": {
-        "binary": "test/fuchsia_audio_codec_lib_test"
+        "binary": "bin/fuchsia_audio_codec_lib_test"
     },
     "sandbox": {
         "services": [
diff --git a/src/connectivity/network/netstack/fuchsia_posix_socket.go b/src/connectivity/network/netstack/fuchsia_posix_socket.go
index 398b5c2..599e3c05 100644
--- a/src/connectivity/network/netstack/fuchsia_posix_socket.go
+++ b/src/connectivity/network/netstack/fuchsia_posix_socket.go
@@ -565,6 +565,15 @@
 			panic(err)
 		}
 
+		// TODO(https://github.com/google/gvisor/issues/5155): Remove this when
+		// link address resolution failures deliver EventHUp.
+		switch tcp.EndpointState(eps.ep.State()) {
+		case tcp.StateConnecting, tcp.StateSynSent, tcp.StateError:
+			if cb := eps.onHUp.Callback; cb != nil {
+				cb.Callback(nil, 0)
+			}
+		}
+
 		eps.ep.Close()
 
 		syslog.VLogTf(syslog.DebugVerbosity, "close", "%p", eps)
@@ -1318,7 +1327,7 @@
 	// Key value 0 would indicate that the endpoint was never
 	// added to the endpoints map.
 	if key == 0 {
-		syslog.Errorf("endpoint map delete error, endpoint with key 0 is not be removed")
+		syslog.Errorf("endpoint map delete error, endpoint with key 0 is not being removed")
 		return
 	}
 	if _, loaded := ns.endpoints.LoadAndDelete(key); !loaded {
diff --git a/src/connectivity/network/netstack/netstack_test.go b/src/connectivity/network/netstack/netstack_test.go
index 63151f5..5327e35 100644
--- a/src/connectivity/network/netstack/netstack_test.go
+++ b/src/connectivity/network/netstack/netstack_test.go
@@ -14,8 +14,8 @@
 	"math"
 	"net"
 	"os"
-	"runtime"
 	"sort"
+	"sync/atomic"
 	"syscall/zx"
 	"testing"
 	"time"
@@ -40,6 +40,7 @@
 	"github.com/google/go-cmp/cmp"
 	"gvisor.dev/gvisor/pkg/tcpip"
 	"gvisor.dev/gvisor/pkg/tcpip/header"
+	"gvisor.dev/gvisor/pkg/tcpip/link/sniffer"
 	"gvisor.dev/gvisor/pkg/tcpip/network/arp"
 	"gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
 	"gvisor.dev/gvisor/pkg/tcpip/network/ipv6"
@@ -76,6 +77,9 @@
 			panic(fmt.Sprintf("syslog.NewLogger(%#v) = %s", options, err))
 		}
 		syslog.SetDefaultLogger(l)
+
+		// As of this writing we set this value to 0 in netstack/main.go.
+		atomic.StoreUint32(&sniffer.LogPackets, 1)
 	}
 
 	os.Exit(m.Run())
@@ -238,18 +242,18 @@
 
 func TestEndpoint_Close(t *testing.T) {
 	ns := newNetstack(t)
-	wq := &waiter.Queue{}
+	var wq waiter.Queue
 	// Avoid polluting everything with err of type *tcpip.Error.
 	ep := func() tcpip.Endpoint {
-		ep, err := ns.stack.NewEndpoint(tcp.ProtocolNumber, ipv6.ProtocolNumber, wq)
+		ep, err := ns.stack.NewEndpoint(tcp.ProtocolNumber, ipv6.ProtocolNumber, &wq)
 		if err != nil {
-			t.Fatalf("NewEndpoint() = %s", err)
+			t.Fatalf("NewEndpoint(tcp.ProtocolNumber, ipv6.ProtocolNumber, _) = %s", err)
 		}
 		return ep
 	}()
 	defer ep.Close()
 
-	eps, err := newEndpointWithSocket(ep, wq, tcp.ProtocolNumber, ipv6.ProtocolNumber, ns)
+	eps, err := newEndpointWithSocket(ep, &wq, tcp.ProtocolNumber, ipv6.ProtocolNumber, ns)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -274,14 +278,13 @@
 		}
 	}
 
-	key := eps.endpoint.key
-	if _, ok := eps.ns.endpoints.Load(key); !ok {
+	if _, ok := eps.ns.endpoints.Load(eps.endpoint.key); !ok {
 		var keys []uint64
 		eps.ns.endpoints.Range(func(key uint64, _ tcpip.Endpoint) bool {
 			keys = append(keys, key)
 			return true
 		})
-		t.Errorf("got endpoints map = %v at creation, want %d", keys, key)
+		t.Errorf("got endpoints map = %d at creation, want %d", keys, eps.endpoint.key)
 	}
 
 	if t.Failed() {
@@ -345,13 +348,13 @@
 		}
 	}
 
-	if _, ok := eps.ns.endpoints.Load(key); !ok {
+	if _, ok := eps.ns.endpoints.Load(eps.endpoint.key); !ok {
 		var keys []uint64
 		eps.ns.endpoints.Range(func(key uint64, _ tcpip.Endpoint) bool {
 			keys = append(keys, key)
 			return true
 		})
-		t.Errorf("got endpoints map prematurely = %v, want %d", keys, key)
+		t.Errorf("got endpoints map prematurely = %d, want %d", keys, eps.endpoint.key)
 	}
 
 	if t.Failed() {
@@ -382,9 +385,9 @@
 				keys = append(keys, key)
 				return true
 			})
-			t.Errorf("got endpoints map = %v after closure, want *not* %d", keys, key)
+			t.Errorf("got endpoints map = %d after closure, want *not* %d", keys, eps.endpoint.key)
 		default:
-			if _, ok := eps.ns.endpoints.Load(key); ok {
+			if _, ok := eps.ns.endpoints.Load(eps.endpoint.key); ok {
 				continue
 			}
 		}
@@ -405,16 +408,11 @@
 		t.Fatalf("ns.addLoopback() = %s", err)
 	}
 
-	createEP := func() (tcpip.Endpoint, *waiter.Queue) {
-		var wq waiter.Queue
-		ep, err := ns.stack.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &wq)
-		if err != nil {
-			t.Fatalf("NewEndpoint() = %s", err)
-		}
-		return ep, &wq
+	var listenerWQ waiter.Queue
+	listener, err := ns.stack.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, &listenerWQ)
+	if err != nil {
+		t.Fatalf("NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, _) = %s", err)
 	}
-
-	listener, listenerWQ := createEP()
 	if err := listener.Bind(tcpip.FullAddress{}); err != nil {
 		t.Fatalf("Bind({}) = %s", err)
 	}
@@ -422,7 +420,10 @@
 		t.Fatalf("Listen(1) = %s", err)
 	}
 
-	client, _ := createEP()
+	client, err := ns.stack.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, new(waiter.Queue))
+	if err != nil {
+		t.Fatalf("NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, _) = %s", err)
+	}
 
 	// Connect and wait for the incoming connection.
 	func() {
@@ -464,35 +465,164 @@
 		return server, wq
 	}()
 
-	eps, err := newEndpointWithSocket(accepted, acceptedWQ, tcp.ProtocolNumber, ipv4.ProtocolNumber, ns)
+	{
+		eps, err := newEndpointWithSocket(accepted, acceptedWQ, tcp.ProtocolNumber, ipv4.ProtocolNumber, ns)
+		if err != nil {
+			t.Fatal(err)
+		}
+		defer eps.close()
+
+		channels := []struct {
+			ch   <-chan struct{}
+			name string
+		}{
+			{ch: eps.closing, name: "closing"},
+			{ch: eps.loopReadDone, name: "loopReadDone"},
+			{ch: eps.loopWriteDone, name: "loopWriteDone"},
+			{ch: eps.loopPollDone, name: "loopPollDone"},
+		}
+
+		// Give a generous timeout for the closed channel to be detected.
+		timeout := make(chan struct{})
+		time.AfterFunc(5*time.Second, func() { close(timeout) })
+		for _, ch := range channels {
+			select {
+			case <-ch.ch:
+			case <-timeout:
+				t.Errorf("%s not cleaned up", ch.name)
+			}
+		}
+
+		if _, ok := ns.endpoints.Load(eps.endpoint.key); ok {
+			t.Fatalf("got endpoints.Load(%d) = (_, true)", eps.endpoint.key)
+		}
+	}
+}
+
+func createEP(t *testing.T, ns *Netstack, wq *waiter.Queue) *endpointWithSocket {
+	// Avoid polluting the scope with err of type *tcpip.Error.
+	ep := func() tcpip.Endpoint {
+		ep, err := ns.stack.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, wq)
+		if err != nil {
+			t.Fatalf("NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, _) = %s", err)
+		}
+		return ep
+	}()
+	t.Cleanup(ep.Close)
+	eps, err := newEndpointWithSocket(ep, wq, tcp.ProtocolNumber, ipv4.ProtocolNumber, ns)
 	if err != nil {
 		t.Fatal(err)
 	}
-	defer eps.close()
+	t.Cleanup(eps.close)
+	return eps
+}
 
-	channels := []struct {
-		ch   <-chan struct{}
-		name string
-	}{
-		{ch: eps.closing, name: "closing"},
-		{ch: eps.loopReadDone, name: "loopReadDone"},
-		{ch: eps.loopWriteDone, name: "loopWriteDone"},
-		{ch: eps.loopPollDone, name: "loopPollDone"},
+func TestTCPEndpointMapClose(t *testing.T) {
+	ns := newNetstack(t)
+	eps := createEP(t, ns, new(waiter.Queue))
+
+	// Closing the endpoint should remove it from the endpoints map.
+	if _, ok := ns.endpoints.Load(eps.endpoint.key); !ok {
+		t.Fatalf("got endpoints.Load(%d) = (_, false)", eps.endpoint.key)
+	}
+	eps.close()
+	if _, ok := ns.endpoints.Load(eps.endpoint.key); ok {
+		t.Fatalf("got endpoints.Load(%d) = (_, true)", eps.endpoint.key)
+	}
+}
+
+func TestTCPEndpointMapConnect(t *testing.T) {
+	ns := newNetstack(t)
+
+	var linkEP tcpipstack.LinkEndpoint = &noopEndpoint{
+		capabilities: tcpipstack.CapabilityResolutionRequired,
+	}
+	if testing.Verbose() {
+		linkEP = sniffer.New(linkEP)
+	}
+	ifs, err := ns.addEndpoint(
+		func(tcpip.NICID) string { return t.Name() },
+		linkEP,
+		nil,
+		nil,
+		false, /* doFilter */
+		0,     /* metric */
+	)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if err := ns.stack.EnableNIC(ifs.nicid); err != nil {
+		t.Fatal(err)
 	}
 
-	// Give a generous timeout for the closed channel to be detected.
-	timeout := make(chan struct{})
-	time.AfterFunc(5*time.Second, func() { close(timeout) })
-	for _, ch := range channels {
-		select {
-		case <-ch.ch:
-		case <-timeout:
-			t.Errorf("%s not cleaned up", ch.name)
-		}
+	address := tcpip.Address([]byte{1, 2, 3, 4})
+	destination := tcpip.FullAddress{
+		Addr: address,
+		Port: 1,
+	}
+	source := tcpip.Address([]byte{5, 6, 7, 8})
+	if err := ns.stack.AddAddress(ifs.nicid, ipv4.ProtocolNumber, source); err != nil {
+		t.Fatalf("AddAddress(%d, %d, %s) = %s", ifs.nicid, ipv4.ProtocolNumber, source, err)
 	}
 
-	if _, ok := ns.endpoints.Load(eps.key); ok {
-		t.Fatalf("got endpoints.Load(%d) = (_, true)", eps.key)
+	ns.stack.SetRouteTable([]tcpip.Route{
+		{
+			Destination: address.WithPrefix().Subnet(),
+			NIC:         ifs.nicid,
+		},
+	})
+
+	// TODO(https://github.com/google/gvisor/issues/5155): Remove the poll=false
+	// case when link address resolution failures deliver EventHUp.
+	for _, pollForError := range []bool{false, true} {
+		t.Run(fmt.Sprintf("pollForError=%t", pollForError), func(t *testing.T) {
+			var wq waiter.Queue
+			eps := createEP(t, ns, &wq)
+
+			events := make(chan waiter.EventMask)
+			if pollForError {
+				// TODO(https://github.com/google/gvisor/issues/5155): Remove the else
+				// branch when link address resolution failures deliver EventHUp.
+				if false {
+					waitEntry := waiter.Entry{Callback: callback(func(_ *waiter.Entry, m waiter.EventMask) {
+						events <- m
+					})}
+					wq.EventRegister(&waitEntry, math.MaxUint64)
+					defer wq.EventUnregister(&waitEntry)
+				} else {
+					go func() {
+						ticker := time.NewTicker(10 * time.Millisecond)
+						defer ticker.Stop()
+						// Wait until link address resolution fails.
+						for {
+							if tcp.EndpointState(eps.ep.State()) == tcp.StateError {
+								break
+							}
+							<-ticker.C
+						}
+						close(events)
+					}()
+				}
+			} else {
+				close(events)
+			}
+
+			if want, got := tcpip.ErrConnectStarted, eps.ep.Connect(destination); got != want {
+				t.Fatalf("got Connect(%#v) = %s, want %s", destination, got, want)
+			}
+			// TODO(https://github.com/google/gvisor/issues/5155): Assert on the
+			// signals here when link address resolution failures deliver EventHUp.
+			<-events
+
+			// Closing the endpoint should remove it from the endpoints map.
+			if _, ok := ns.endpoints.Load(eps.endpoint.key); !ok {
+				t.Fatalf("got endpoints.Load(%d) = (_, false)", eps.endpoint.key)
+			}
+			eps.close()
+			if _, ok := ns.endpoints.Load(eps.endpoint.key); ok {
+				t.Fatalf("got endpoints.Load(%d) = (_, true)", eps.endpoint.key)
+			}
+		})
 	}
 }
 
@@ -504,25 +634,7 @@
 	if err := ns.addLoopback(); err != nil {
 		t.Fatalf("ns.addLoopback() = %s", err)
 	}
-	createEP := func() *endpointWithSocket {
-		wq := &waiter.Queue{}
-		// Avoid polluting everything with err of type *tcpip.Error.
-		ep := func() tcpip.Endpoint {
-			ep, err := ns.stack.NewEndpoint(tcp.ProtocolNumber, ipv4.ProtocolNumber, wq)
-			if err != nil {
-				t.Fatalf("NewEndpoint() = %s", err)
-			}
-			return ep
-		}()
-		t.Cleanup(ep.Close)
-		eps, err := newEndpointWithSocket(ep, wq, tcp.ProtocolNumber, ipv4.ProtocolNumber, ns)
-		if err != nil {
-			t.Fatal(err)
-		}
-		t.Cleanup(eps.close)
-		return eps
-	}
-	listener := createEP()
+	listener := createEP(t, ns, new(waiter.Queue))
 
 	if err := listener.ep.Bind(tcpip.FullAddress{}); err != nil {
 		t.Fatalf("ep.Bind({}) = %s", err)
@@ -534,7 +646,7 @@
 	if err != nil {
 		t.Fatalf("ep.GetLocalAddress() = %s", err)
 	}
-	client := createEP()
+	client := createEP(t, ns, new(waiter.Queue))
 
 	waitEntry, inCh := waiter.NewChannelEntry(nil)
 	listener.wq.EventRegister(&waitEntry, waiter.EventIn)
@@ -570,15 +682,10 @@
 	defer ticker.Stop()
 	// Wait and check for the client active close to reach FIN_WAIT2 state.
 	for {
-		select {
-		case <-ticker.C:
-			state := tcp.EndpointState(client.ep.State())
-			if state != tcp.StateFinWait2 {
-				runtime.Gosched()
-				continue
-			}
+		if tcp.EndpointState(client.ep.State()) == tcp.StateFinWait2 {
+			break
 		}
-		break
+		<-ticker.C
 	}
 
 	// Lookup for the client once more in the endpoints map, it should still not
@@ -598,16 +705,12 @@
 	// gVisor stack notifies EventHUp on entering TIME_WAIT. Wait for some time
 	// for the EventHUp to be processed by netstack.
 	for {
-		select {
-		case <-ticker.C:
-			// The client endpoint would be removed from the endpoints map as a result
-			// of processing EventHUp.
-			if _, ok := ns.endpoints.Load(client.endpoint.key); ok {
-				runtime.Gosched()
-				continue
-			}
+		// The client endpoint would be removed from the endpoints map as a result
+		// of processing EventHUp.
+		if _, ok := ns.endpoints.Load(client.endpoint.key); !ok {
+			break
 		}
-		break
+		<-ticker.C
 	}
 }
 
@@ -618,13 +721,13 @@
 	}
 
 	tcpipEP := func() (*waiter.Queue, tcpip.Endpoint) {
-		wq := &waiter.Queue{}
-		ep, err := ns.stack.NewEndpoint(tcp.ProtocolNumber, ipv6.ProtocolNumber, wq)
+		var wq waiter.Queue
+		ep, err := ns.stack.NewEndpoint(tcp.ProtocolNumber, ipv6.ProtocolNumber, &wq)
 		if err != nil {
-			t.Fatalf("NewEndpoint() = %s", err)
+			t.Fatalf("NewEndpoint(tcp.ProtocolNumber, ipv6.ProtocolNumber, _) = %s", err)
 		}
 		t.Cleanup(ep.Close)
-		return wq, ep
+		return &wq, ep
 	}
 	// Test if we always skip key value 0 while adding to the map.
 	for _, key := range []uint64{0, math.MaxUint64} {
diff --git a/src/connectivity/network/netstack/noop_endpoint_test.go b/src/connectivity/network/netstack/noop_endpoint_test.go
index 3a33a63..e3f5c0d 100644
--- a/src/connectivity/network/netstack/noop_endpoint_test.go
+++ b/src/connectivity/network/netstack/noop_endpoint_test.go
@@ -19,16 +19,17 @@
 var _ stack.LinkEndpoint = (*noopEndpoint)(nil)
 
 type noopEndpoint struct {
-	linkAddress tcpip.LinkAddress
-	attached    chan struct{}
+	capabilities stack.LinkEndpointCapabilities
+	linkAddress  tcpip.LinkAddress
+	attached     chan struct{}
 }
 
 func (*noopEndpoint) MTU() uint32 {
 	return 0
 }
 
-func (*noopEndpoint) Capabilities() stack.LinkEndpointCapabilities {
-	return 0
+func (ep *noopEndpoint) Capabilities() stack.LinkEndpointCapabilities {
+	return ep.capabilities
 }
 
 func (*noopEndpoint) MaxHeaderLength() uint16 {
diff --git a/src/connectivity/openthread/ot-stack/BUILD.gn b/src/connectivity/openthread/ot-stack/BUILD.gn
index ca379367..1d8f0d1 100644
--- a/src/connectivity/openthread/ot-stack/BUILD.gn
+++ b/src/connectivity/openthread/ot-stack/BUILD.gn
@@ -42,15 +42,15 @@
   ]
 
   include_dirs = [
-    "//third_party/openthread/src/core",
-    "//third_party/openthread/src/core/common",
-    "//third_party/openthread/src/core/radio",
-    "//third_party/openthread/include",
-    "//third_party/openthread/src",
-    "//third_party/openthread/src/ncp",
-    "//third_party/openthread/src/lib/spinel",
-    "//third_party/openthread/third_party/mbedtls",
-    "//third_party/openthread/third_party/mbedtls/repo/include",
+    "//third_party/openthread/openthread-tmp/src/core",
+    "//third_party/openthread/openthread-tmp/src/core/common",
+    "//third_party/openthread/openthread-tmp/src/core/radio",
+    "//third_party/openthread/openthread-tmp/include",
+    "//third_party/openthread/openthread-tmp/src",
+    "//third_party/openthread/openthread-tmp/src/ncp",
+    "//third_party/openthread/openthread-tmp/src/lib/spinel",
+    "//third_party/openthread/openthread-tmp/third_party/mbedtls",
+    "//third_party/openthread/openthread-tmp/third_party/mbedtls/repo/include",
     "//src/connectivity/openthread/third_party/openthread/platform",
   ]
 
diff --git a/src/connectivity/openthread/third_party/openthread/platform/BUILD.gn b/src/connectivity/openthread/third_party/openthread/platform/BUILD.gn
index 4aebf84..47db8a3 100644
--- a/src/connectivity/openthread/third_party/openthread/platform/BUILD.gn
+++ b/src/connectivity/openthread/third_party/openthread/platform/BUILD.gn
@@ -26,14 +26,14 @@
   ]
 
   include_dirs = [
-    "//third_party/openthread/src/core",
-    "//third_party/openthread/src/core/common",
-    "//third_party/openthread/src/core/radio",
-    "//third_party/openthread/include",
-    "//third_party/openthread/src",
-    "//third_party/openthread/src/lib/spinel",
-    "//third_party/openthread/third_party/mbedtls",
-    "//third_party/openthread/third_party/mbedtls/repo/include",
+    "//third_party/openthread/openthread-tmp/src/core",
+    "//third_party/openthread/openthread-tmp/src/core/common",
+    "//third_party/openthread/openthread-tmp/src/core/radio",
+    "//third_party/openthread/openthread-tmp/include",
+    "//third_party/openthread/openthread-tmp/src",
+    "//third_party/openthread/openthread-tmp/src/lib/spinel",
+    "//third_party/openthread/openthread-tmp/third_party/mbedtls",
+    "//third_party/openthread/openthread-tmp/third_party/mbedtls/repo/include",
   ]
 
   defines = [
@@ -54,7 +54,7 @@
     "//src/lib/fsl",
     "//src/lib/json_parser",
     "//third_party/modp_b64",
-    "//third_party/openthread:lib-ot-core",
+    "//third_party/openthread/openthread-tmp:lib-ot-core",
     "//zircon/public/lib/zx",
   ]
   configs += [ "//build/config:Wno-conversion" ]
diff --git a/src/connectivity/openthread/third_party/openthread/platform/tests/BUILD.gn b/src/connectivity/openthread/third_party/openthread/platform/tests/BUILD.gn
index c90d183..a559823 100644
--- a/src/connectivity/openthread/third_party/openthread/platform/tests/BUILD.gn
+++ b/src/connectivity/openthread/third_party/openthread/platform/tests/BUILD.gn
@@ -67,7 +67,7 @@
   ]
 
   include_dirs = [
-    "//third_party/openthread/include",
+    "//third_party/openthread/openthread-tmp/include",
     "//src/connectivity/openthread/third_party/openthread/platform",
   ]
 
diff --git a/src/connectivity/wlan/testing/ap-smoke-test/src/main.rs b/src/connectivity/wlan/testing/ap-smoke-test/src/main.rs
index 9ca57f3..25b1c815 100644
--- a/src/connectivity/wlan/testing/ap-smoke-test/src/main.rs
+++ b/src/connectivity/wlan/testing/ap-smoke-test/src/main.rs
@@ -183,6 +183,8 @@
                         &wlan_client_iface.sme_proxy,
                         target_ssid.to_vec(),
                         target_pwd.to_vec(),
+                        // TODO(fxbug.dev/66665): pass in a bss description or use Policy layer
+                        None,
                     )
                     .await;
 
diff --git a/src/connectivity/wlan/testing/client-smoke-test/src/main.rs b/src/connectivity/wlan/testing/client-smoke-test/src/main.rs
index 2a29cef..8705ca0 100644
--- a/src/connectivity/wlan/testing/client-smoke-test/src/main.rs
+++ b/src/connectivity/wlan/testing/client-smoke-test/src/main.rs
@@ -124,6 +124,8 @@
                     &wlan_iface.sme_proxy,
                     opt.target_ssid.as_bytes().to_vec(),
                     opt.target_pwd.as_bytes().to_vec(),
+                    // TODO(fxbug.dev/66665): pass in a bss description or use Policy layer
+                    None,
                 )
                 // TODO(fxbug.dev/29881): when this bug is fixed, consider removing this timeout
                 .on_timeout(WLAN_CONNECT_TIMEOUT_SECONDS.seconds().after_now(), || {
diff --git a/src/connectivity/wlan/testing/client-stress-test/src/main.rs b/src/connectivity/wlan/testing/client-stress-test/src/main.rs
index 19344e2..cc41c8c 100644
--- a/src/connectivity/wlan/testing/client-stress-test/src/main.rs
+++ b/src/connectivity/wlan/testing/client-stress-test/src/main.rs
@@ -117,6 +117,8 @@
                         &wlaniface.sme_proxy,
                         opt.target_ssid.as_bytes().to_vec(),
                         opt.target_pwd.as_bytes().to_vec(),
+                        // TODO(fxbug.dev/66665): pass in a bss description or use Policy layer
+                        None,
                     )
                     .await;
                     match result {
diff --git a/src/connectivity/wlan/testing/wlan-service-util/BUILD.gn b/src/connectivity/wlan/testing/wlan-service-util/BUILD.gn
index c6055b3..323d2a2 100644
--- a/src/connectivity/wlan/testing/wlan-service-util/BUILD.gn
+++ b/src/connectivity/wlan/testing/wlan-service-util/BUILD.gn
@@ -15,6 +15,7 @@
     "//sdk/fidl/fuchsia.wlan.common:fuchsia.wlan.common-rustc",
     "//sdk/fidl/fuchsia.wlan.device:fuchsia.wlan.device-rustc",
     "//sdk/fidl/fuchsia.wlan.device.service:fuchsia.wlan.device.service-rustc",
+    "//sdk/fidl/fuchsia.wlan.internal:fuchsia.wlan.internal-rustc",
     "//sdk/fidl/fuchsia.wlan.sme:fuchsia.wlan.sme-rustc",
     "//src/connectivity/wlan/lib/common/rust:wlan-common",
     "//src/lib/fidl/rust/fidl",
@@ -25,6 +26,7 @@
     "//third_party/rust_crates:futures",
     "//third_party/rust_crates:hex",
     "//third_party/rust_crates:pin-utils",
+    "//third_party/rust_crates:rand",
   ]
 
   sources = [
diff --git a/src/connectivity/wlan/testing/wlan-service-util/src/client.rs b/src/connectivity/wlan/testing/wlan-service-util/src/client.rs
index c404625..4ed3a19 100644
--- a/src/connectivity/wlan/testing/wlan-service-util/src/client.rs
+++ b/src/connectivity/wlan/testing/wlan-service-util/src/client.rs
@@ -7,6 +7,7 @@
 use fidl_fuchsia_wlan_common as fidl_common;
 use fidl_fuchsia_wlan_device::MacRole;
 use fidl_fuchsia_wlan_device_service::DeviceServiceProxy;
+use fidl_fuchsia_wlan_internal as fidl_internal;
 use fidl_fuchsia_wlan_sme as fidl_sme;
 use fuchsia_syslog::fx_log_err;
 use fuchsia_zircon as zx;
@@ -45,6 +46,7 @@
     iface_sme_proxy: &fidl_sme::ClientSmeProxy,
     target_ssid: Vec<u8>,
     target_pwd: Vec<u8>,
+    target_bss_desc: Option<Box<fidl_internal::BssDescription>>,
 ) -> Result<bool, Error> {
     let (connection_proxy, connection_remote) = endpoints::create_proxy()?;
     let target_ssid_clone = target_ssid.clone();
@@ -53,7 +55,7 @@
     let credential = credential_from_bytes(target_pwd)?;
     let mut req = fidl_sme::ConnectRequest {
         ssid: target_ssid,
-        bss_desc: None,
+        bss_desc: target_bss_desc,
         credential,
         radio_cfg: fidl_sme::RadioConfig {
             override_phy: false,
@@ -261,9 +263,39 @@
         futures::stream::{StreamExt, StreamFuture},
         futures::task::Poll,
         pin_utils::pin_mut,
+        rand::Rng as _,
+        std::convert::TryInto as _,
         wlan_common::assert_variant,
     };
 
+    fn generate_random_bss_desc() -> Option<Box<fidl_fuchsia_wlan_internal::BssDescription>> {
+        let mut rng = rand::thread_rng();
+        Some(Box::new(fidl_fuchsia_wlan_internal::BssDescription {
+            bssid: (0..6).map(|_| rng.gen::<u8>()).collect::<Vec<u8>>().try_into().unwrap(),
+            bss_type: fidl_fuchsia_wlan_internal::BssTypes::Personal,
+            beacon_period: rng.gen::<u16>(),
+            timestamp: rng.gen::<u64>(),
+            local_time: rng.gen::<u64>(),
+            cap: rng.gen::<u16>(),
+            ies: (0..1024).map(|_| rng.gen::<u8>()).collect(),
+            rssi_dbm: rng.gen::<i8>(),
+            chan: fidl_common::WlanChan {
+                primary: rng.gen::<u8>(),
+                cbw: match rng.gen_range(0, 5) {
+                    0 => fidl_common::Cbw::Cbw20,
+                    1 => fidl_common::Cbw::Cbw40,
+                    2 => fidl_common::Cbw::Cbw40Below,
+                    3 => fidl_common::Cbw::Cbw80,
+                    4 => fidl_common::Cbw::Cbw160,
+                    5 => fidl_common::Cbw::Cbw80P80,
+                    _ => panic!(),
+                },
+                secondary80: rng.gen::<u8>(),
+            },
+            snr_db: rng.gen::<i8>(),
+        }))
+    }
+
     fn extract_sme_server_from_get_client_sme_req_and_respond(
         exec: &mut Executor,
         req_stream: &mut DeviceServiceRequestStream,
@@ -775,8 +807,10 @@
 
         let target_ssid = ssid.as_bytes();
         let target_password = password.as_bytes();
+        let target_bss_desc = generate_random_bss_desc();
 
-        let fut = connect(&client_sme, target_ssid.to_vec(), target_password.to_vec());
+        let fut =
+            connect(&client_sme, target_ssid.to_vec(), target_password.to_vec(), target_bss_desc);
         pin_mut!(fut);
         assert!(exec.run_until_stalled(&mut fut).is_pending());
 
@@ -823,8 +857,14 @@
 
         let target_ssid = "TestAp".as_bytes();
         let target_password = "password".as_bytes();
+        let target_bss_desc = generate_random_bss_desc();
 
-        let fut = connect(&client_sme, target_ssid.to_vec(), target_password.to_vec());
+        let fut = connect(
+            &client_sme,
+            target_ssid.to_vec(),
+            target_password.to_vec(),
+            target_bss_desc.clone(),
+        );
         pin_mut!(fut);
         assert!(exec.run_until_stalled(&mut fut).is_pending());
 
@@ -834,6 +874,7 @@
             &mut next_client_sme_req,
             target_ssid,
             credential_from_bytes(target_password.to_vec()).expect("password should be valid"),
+            target_bss_desc,
         );
     }
 
@@ -845,8 +886,14 @@
 
         let target_ssid = "TestAp".as_bytes();
         let target_password = "".as_bytes();
+        let target_bss_desc = generate_random_bss_desc();
 
-        let fut = connect(&client_sme, target_ssid.to_vec(), target_password.to_vec());
+        let fut = connect(
+            &client_sme,
+            target_ssid.to_vec(),
+            target_password.to_vec(),
+            target_bss_desc.clone(),
+        );
         pin_mut!(fut);
         assert!(exec.run_until_stalled(&mut fut).is_pending());
 
@@ -856,6 +903,7 @@
             &mut next_client_sme_req,
             target_ssid,
             credential_from_bytes(vec![]).expect("password should be valid"),
+            target_bss_desc,
         );
     }
 
@@ -864,11 +912,13 @@
         server: &mut StreamFuture<ClientSmeRequestStream>,
         expected_ssid: &[u8],
         expected_credential: fidl_sme::Credential,
+        expected_bss_desc: Option<Box<fidl_internal::BssDescription>>,
     ) {
         match poll_client_sme_request(exec, server) {
             Poll::Ready(ClientSmeRequest::Connect { req, .. }) => {
                 assert_eq!(expected_ssid, &req.ssid[..]);
                 assert_eq_credentials(&req.credential, &expected_credential);
+                assert_eq!(req.bss_desc, expected_bss_desc);
             }
             _ => panic!("expected a Connect request"),
         }
diff --git a/src/developer/forensics/feedback_data/tests/metadata_unittest.cc b/src/developer/forensics/feedback_data/tests/metadata_unittest.cc
index 9e244f1..136d197 100644
--- a/src/developer/forensics/feedback_data/tests/metadata_unittest.cc
+++ b/src/developer/forensics/feedback_data/tests/metadata_unittest.cc
@@ -412,8 +412,8 @@
 
   const zx::duration utc_monotonic_difference(utc.get() - monotonic.get());
 
-  ASSERT_EQ(clock_.Now(&monotonic), ZX_OK);
-  ASSERT_EQ(clock_.Now(&utc), ZX_OK);
+  monotonic = clock_.Now();
+  ASSERT_EQ(clock_.UtcNow(&utc), ZX_OK);
 
   const auto metadata_json = MakeJsonReport(::fit::ok<Annotations>(std::move(annotations)),
                                             ::fit::ok<Attachments>(std::move(attachments)));
diff --git a/src/developer/forensics/utils/tests/utc_time_provider_unittest.cc b/src/developer/forensics/utils/tests/utc_time_provider_unittest.cc
index 13a05fc..b9545d7 100644
--- a/src/developer/forensics/utils/tests/utc_time_provider_unittest.cc
+++ b/src/developer/forensics/utils/tests/utc_time_provider_unittest.cc
@@ -74,11 +74,9 @@
   StartClock(zx::time(0));
   RunLoopUntilIdle();
 
-  zx::time monotonic;
+  zx::time monotonic = clock_.Now();
   zx::time_utc utc;
-
-  ASSERT_EQ(clock_.Now(&monotonic), ZX_OK);
-  ASSERT_EQ(clock_.Now(&utc), ZX_OK);
+  ASSERT_EQ(clock_.UtcNow(&utc), ZX_OK);
 
   const auto utc_monotonic_difference = utc_provider_->CurrentUtcMonotonicDifference();
   ASSERT_TRUE(utc_monotonic_difference.has_value());
diff --git a/src/developer/forensics/utils/time.cc b/src/developer/forensics/utils/time.cc
index 49a7266..b0cea9f 100644
--- a/src/developer/forensics/utils/time.cc
+++ b/src/developer/forensics/utils/time.cc
@@ -37,7 +37,7 @@
 
 std::optional<zx::time_utc> CurrentUtcTimeRaw(timekeeper::Clock* clock) {
   zx::time_utc now_utc;
-  if (const zx_status_t status = clock->Now(&now_utc); status != ZX_OK) {
+  if (const zx_status_t status = clock->UtcNow(&now_utc); status != ZX_OK) {
     return std::nullopt;
   }
 
diff --git a/src/developer/system_monitor/bin/harvester/BUILD.gn b/src/developer/system_monitor/bin/harvester/BUILD.gn
index 0d9530b..2b5370c9 100644
--- a/src/developer/system_monitor/bin/harvester/BUILD.gn
+++ b/src/developer/system_monitor/bin/harvester/BUILD.gn
@@ -49,11 +49,11 @@
     "gather_vmos.h",
     "harvester.cc",
     "harvester.h",
+    "info_resource.cc",
+    "info_resource.h",
     "log_listener.cc",
     "log_listener.h",
     "os.h",
-    "root_resource.cc",
-    "root_resource.h",
     "sample_bundle.cc",
     "sample_bundle.h",
     "task_tree.cc",
@@ -62,8 +62,8 @@
   ]
 
   public_deps = [
-    "//sdk/fidl/fuchsia.boot:fuchsia.boot_c",
     "//sdk/fidl/fuchsia.diagnostics",
+    "//sdk/fidl/fuchsia.kernel",
     "//sdk/fidl/fuchsia.kernel:fuchsia.kernel_c",
     "//sdk/fidl/fuchsia.sysinfo",
     "//sdk/lib/fdio",
@@ -81,6 +81,7 @@
     "//zircon/system/ulib/task-utils",
     "//zircon/system/ulib/trace",
     "//zircon/system/ulib/trace-provider:trace-provider-with-fdio",
+    "//zircon/system/ulib/zircon:zircon-headers",
   ]
 
   # TODO(fxb/58162): delete the below and fix compiler warnings
@@ -142,8 +143,8 @@
     "gather_vmos_test.cc",
     "harvester_fake.h",
     "harvester_test.cc",
+    "info_resource_test.cc",
     "mock_dockyard_stub.h",
-    "root_resource_test.cc",
     "sample_bundle_test.cc",
     "task_tree_test.cc",
     "union_find_test.cc",
diff --git a/src/developer/system_monitor/bin/harvester/gather_category.h b/src/developer/system_monitor/bin/harvester/gather_category.h
index 19478b9..35fd9a5 100644
--- a/src/developer/system_monitor/bin/harvester/gather_category.h
+++ b/src/developer/system_monitor/bin/harvester/gather_category.h
@@ -36,9 +36,9 @@
 // manageable and enabling/disabling categories.
 class GatherCategory {
  public:
-  GatherCategory(zx_handle_t root_resource,
+  GatherCategory(zx_handle_t info_resource,
                  harvester::DockyardProxy* dockyard_proxy)
-      : root_resource_(root_resource), dockyard_proxy_(dockyard_proxy) {}
+      : info_resource_(info_resource), dockyard_proxy_(dockyard_proxy) {}
   virtual ~GatherCategory() = default;
 
   // The dockyard proxy is used to send data to the remote Dockyard.
@@ -51,8 +51,8 @@
   // Override this in a base class to gather sample data.
   virtual void Gather() = 0;
 
-  // Get the root resource of the job/process/thread tree.
-  zx_handle_t RootResource() { return root_resource_; }
+  // Get the info resource of the job/process/thread tree.
+  zx_handle_t InfoResource() const { return info_resource_; }
 
   // Set (or reset) the time this task will run on |dispatcher|.
   // |Gather()| will be called at (or after) |start| and then every multiple of
@@ -67,7 +67,7 @@
  private:
   async::TaskMethod<GatherCategory, &GatherCategory::TaskHandler> task_method_{
       this};
-  zx_handle_t root_resource_;
+  zx_handle_t info_resource_;
   harvester::DockyardProxy* dockyard_proxy_;
 
   zx::duration update_period_;
diff --git a/src/developer/system_monitor/bin/harvester/gather_channels.h b/src/developer/system_monitor/bin/harvester/gather_channels.h
index d8d40a44..90096ad 100644
--- a/src/developer/system_monitor/bin/harvester/gather_channels.h
+++ b/src/developer/system_monitor/bin/harvester/gather_channels.h
@@ -15,9 +15,9 @@
 // Gather Samples for jobs, processes, and threads.
 class GatherChannels : public GatherCategory {
  public:
-  GatherChannels(zx_handle_t root_resource,
+  GatherChannels(zx_handle_t info_resource,
                  harvester::DockyardProxy* dockyard_proxy)
-      : GatherCategory(root_resource, dockyard_proxy) {}
+      : GatherCategory(info_resource, dockyard_proxy) {}
 
   // GatherCategory.
   void Gather() override;
diff --git a/src/developer/system_monitor/bin/harvester/gather_channels_test.cc b/src/developer/system_monitor/bin/harvester/gather_channels_test.cc
index f5587ec..fe31e6e 100644
--- a/src/developer/system_monitor/bin/harvester/gather_channels_test.cc
+++ b/src/developer/system_monitor/bin/harvester/gather_channels_test.cc
@@ -7,7 +7,7 @@
 #include <gtest/gtest.h>
 
 #include "dockyard_proxy_fake.h"
-#include "root_resource.h"
+#include "info_resource.h"
 
 namespace {
 
@@ -26,10 +26,10 @@
 };
 
 TEST_F(GatherChannelsTest, SmokeTest) {
-  zx_handle_t root_resource;
-  ASSERT_EQ(harvester::GetRootResource(&root_resource), ZX_OK);
+  zx_handle_t info_resource;
+  ASSERT_EQ(harvester::GetInfoResource(&info_resource), ZX_OK);
   harvester::DockyardProxyFake dockyard_proxy;
-  harvester::GatherChannels gatherer(root_resource, &dockyard_proxy);
+  harvester::GatherChannels gatherer(info_resource, &dockyard_proxy);
   harvester::g_slow_data_task_tree.Gather();
   gatherer.Gather();
   // Verify that something is being sent.
@@ -43,10 +43,10 @@
 }
 
 TEST_F(GatherChannelsTest, ProcessesAndPeers) {
-  zx_handle_t root_resource;
-  ASSERT_EQ(harvester::GetRootResource(&root_resource), ZX_OK);
+  zx_handle_t info_resource;
+  ASSERT_EQ(harvester::GetInfoResource(&info_resource), ZX_OK);
   harvester::DockyardProxyFake dockyard_proxy;
-  harvester::GatherChannels gatherer(root_resource, &dockyard_proxy);
+  harvester::GatherChannels gatherer(info_resource, &dockyard_proxy);
   harvester::g_slow_data_task_tree.Gather();
   gatherer.Gather();
 
diff --git a/src/developer/system_monitor/bin/harvester/gather_cpu.cc b/src/developer/system_monitor/bin/harvester/gather_cpu.cc
index c7a88a8..12c6b80 100644
--- a/src/developer/system_monitor/bin/harvester/gather_cpu.cc
+++ b/src/developer/system_monitor/bin/harvester/gather_cpu.cc
@@ -24,11 +24,11 @@
 
 }  // namespace
 
-void AddGlobalCpuSamples(SampleBundle* samples, zx_handle_t root_resource) {
+void AddGlobalCpuSamples(SampleBundle* samples, zx_handle_t info_resource) {
   // TODO(fxbug.dev/34): Determine the array size at runtime (32 is arbitrary).
   zx_info_cpu_stats_t stats[32];
   size_t actual, avail;
-  zx_status_t err = zx_object_get_info(root_resource, ZX_INFO_CPU_STATS, &stats,
+  zx_status_t err = zx_object_get_info(info_resource, ZX_INFO_CPU_STATS, &stats,
                                        sizeof(stats), &actual, &avail);
   if (err != ZX_OK) {
     FX_LOGS(ERROR) << ZxErrorString("ZX_INFO_CPU_STATS", err);
@@ -68,7 +68,7 @@
   const std::string CPU_COUNT = "cpu:count";
   zx_info_cpu_stats_t stats[1];
   size_t actual, avail;
-  zx_status_t err = zx_object_get_info(RootResource(), ZX_INFO_CPU_STATS,
+  zx_status_t err = zx_object_get_info(InfoResource(), ZX_INFO_CPU_STATS,
                                        &stats, sizeof(stats), &actual, &avail);
   if (err != ZX_OK) {
     FX_LOGS(ERROR) << ZxErrorString("ZX_INFO_CPU_STATS", err);
@@ -85,7 +85,7 @@
 
 void GatherCpu::Gather() {
   SampleBundle samples;
-  AddGlobalCpuSamples(&samples, RootResource());
+  AddGlobalCpuSamples(&samples, InfoResource());
   samples.Upload(DockyardPtr());
 }
 
diff --git a/src/developer/system_monitor/bin/harvester/gather_cpu.h b/src/developer/system_monitor/bin/harvester/gather_cpu.h
index 660a5e8..56ca4f1 100644
--- a/src/developer/system_monitor/bin/harvester/gather_cpu.h
+++ b/src/developer/system_monitor/bin/harvester/gather_cpu.h
@@ -2,20 +2,20 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef GARNET_BIN_SYSTEM_MONITOR_HARVESTER_GATHER_CPU_H_
-#define GARNET_BIN_SYSTEM_MONITOR_HARVESTER_GATHER_CPU_H_
+#ifndef SRC_DEVELOPER_SYSTEM_MONITOR_BIN_HARVESTER_GATHER_CPU_H_
+#define SRC_DEVELOPER_SYSTEM_MONITOR_BIN_HARVESTER_GATHER_CPU_H_
 
 #include "gather_category.h"
 
 namespace harvester {
 
 class SampleBundle;
-void AddGlobalCpuSamples(SampleBundle* samples, zx_handle_t root_resource);
+void AddGlobalCpuSamples(SampleBundle* samples, zx_handle_t info_resource);
 
 class GatherCpu : public GatherCategory {
  public:
-  GatherCpu(zx_handle_t root_resource, harvester::DockyardProxy* dockyard_proxy)
-      : GatherCategory(root_resource, dockyard_proxy) {}
+  GatherCpu(zx_handle_t info_resource, harvester::DockyardProxy* dockyard_proxy)
+      : GatherCategory(info_resource, dockyard_proxy) {}
 
   // GatherCategory.
   void GatherDeviceProperties() override;
@@ -24,4 +24,4 @@
 
 }  // namespace harvester
 
-#endif  // GARNET_BIN_SYSTEM_MONITOR_HARVESTER_GATHER_CPU_H_
+#endif  // SRC_DEVELOPER_SYSTEM_MONITOR_BIN_HARVESTER_GATHER_CPU_H_
diff --git a/src/developer/system_monitor/bin/harvester/gather_cpu_test.cc b/src/developer/system_monitor/bin/harvester/gather_cpu_test.cc
index 65825c0..f1c886a 100644
--- a/src/developer/system_monitor/bin/harvester/gather_cpu_test.cc
+++ b/src/developer/system_monitor/bin/harvester/gather_cpu_test.cc
@@ -7,17 +7,17 @@
 #include <gtest/gtest.h>
 
 #include "dockyard_proxy_fake.h"
-#include "root_resource.h"
+#include "info_resource.h"
 
 class GatherCpuTest : public ::testing::Test {};
 
 TEST_F(GatherCpuTest, CheckValues) {
-  zx_handle_t root_resource;
-  zx_status_t ret = harvester::GetRootResource(&root_resource);
+  zx_handle_t info_resource;
+  zx_status_t ret = harvester::GetInfoResource(&info_resource);
   ASSERT_EQ(ret, ZX_OK);
   harvester::DockyardProxyFake dockyard_proxy;
 
-  harvester::GatherCpu gatherer(root_resource, &dockyard_proxy);
+  harvester::GatherCpu gatherer(info_resource, &dockyard_proxy);
   uint64_t test_value;
 
   gatherer.GatherDeviceProperties();
diff --git a/src/developer/system_monitor/bin/harvester/gather_device_info.h b/src/developer/system_monitor/bin/harvester/gather_device_info.h
index 57749fd..a24cb8d 100644
--- a/src/developer/system_monitor/bin/harvester/gather_device_info.h
+++ b/src/developer/system_monitor/bin/harvester/gather_device_info.h
@@ -18,15 +18,15 @@
 // Collect static information about the current device.
 class GatherDeviceInfo : public GatherCategory {
  public:
-  GatherDeviceInfo(zx_handle_t root_resource,
+  GatherDeviceInfo(zx_handle_t info_resource,
                    harvester::DockyardProxy* dockyard_proxy)
-      : GatherDeviceInfo(root_resource, dockyard_proxy,
+      : GatherDeviceInfo(info_resource, dockyard_proxy,
                          std::make_unique<AnnotationsProvider>()) {}
 
-  GatherDeviceInfo(zx_handle_t root_resource,
+  GatherDeviceInfo(zx_handle_t info_resource,
                    harvester::DockyardProxy* dockyard_proxy,
                    std::unique_ptr<AnnotationsProvider> annotations_provider)
-      : GatherCategory(root_resource, dockyard_proxy),
+      : GatherCategory(info_resource, dockyard_proxy),
         annotations_provider_(std::move(annotations_provider)) {}
 
   // GatherCategory.
diff --git a/src/developer/system_monitor/bin/harvester/gather_device_info_test.cc b/src/developer/system_monitor/bin/harvester/gather_device_info_test.cc
index 447e8ee..3485457 100644
--- a/src/developer/system_monitor/bin/harvester/gather_device_info_test.cc
+++ b/src/developer/system_monitor/bin/harvester/gather_device_info_test.cc
@@ -8,7 +8,7 @@
 
 #include "build_info.h"
 #include "dockyard_proxy_fake.h"
-#include "root_resource.h"
+#include "info_resource.h"
 
 class GatherDeviceInfoTest : public ::testing::Test {};
 
@@ -48,8 +48,8 @@
 };
 
 TEST_F(GatherDeviceInfoTest, WithAllExpectedValues) {
-  zx_handle_t root_resource;
-  zx_status_t ret = harvester::GetRootResource(&root_resource);
+  zx_handle_t info_resource;
+  zx_status_t ret = harvester::GetInfoResource(&info_resource);
   ASSERT_EQ(ret, ZX_OK);
   harvester::DockyardProxyFake dockyard_proxy;
   std::unique_ptr<FakeAnnotationsProvider> annotations_provider =
@@ -59,7 +59,7 @@
   annotations_provider->SetAnnotation("build.product", "some-product");
   annotations_provider->SetAnnotation("device.board-name", "device-board-name");
 
-  harvester::GatherDeviceInfo gatherer(root_resource, &dockyard_proxy,
+  harvester::GatherDeviceInfo gatherer(info_resource, &dockyard_proxy,
                                        std::move(annotations_provider));
 
   gatherer.GatherDeviceProperties();
@@ -77,8 +77,8 @@
 }
 
 TEST_F(GatherDeviceInfoTest, HandlesSomeMissingExpectedValues) {
-  zx_handle_t root_resource;
-  zx_status_t ret = harvester::GetRootResource(&root_resource);
+  zx_handle_t info_resource;
+  zx_status_t ret = harvester::GetInfoResource(&info_resource);
   ASSERT_EQ(ret, ZX_OK);
   harvester::DockyardProxyFake dockyard_proxy;
   std::unique_ptr<FakeAnnotationsProvider> annotations_provider =
@@ -88,7 +88,7 @@
   annotations_provider->SetAnnotation("device.NOT-board-name",
                                       "NOT-device-board-name");
 
-  harvester::GatherDeviceInfo gatherer(root_resource, &dockyard_proxy,
+  harvester::GatherDeviceInfo gatherer(info_resource, &dockyard_proxy,
                                        std::move(annotations_provider));
 
   gatherer.GatherDeviceProperties();
@@ -104,14 +104,14 @@
 }
 
 TEST_F(GatherDeviceInfoTest, HandlesAllMissingExpectedValues) {
-  zx_handle_t root_resource;
-  zx_status_t ret = harvester::GetRootResource(&root_resource);
+  zx_handle_t info_resource;
+  zx_status_t ret = harvester::GetInfoResource(&info_resource);
   ASSERT_EQ(ret, ZX_OK);
   harvester::DockyardProxyFake dockyard_proxy;
   std::unique_ptr<FakeAnnotationsProvider> annotations_provider =
       std::make_unique<FakeAnnotationsProvider>();
 
-  harvester::GatherDeviceInfo gatherer(root_resource, &dockyard_proxy,
+  harvester::GatherDeviceInfo gatherer(info_resource, &dockyard_proxy,
                                        std::move(annotations_provider));
 
   gatherer.GatherDeviceProperties();
@@ -126,14 +126,14 @@
 }
 
 TEST_F(GatherDeviceInfoTest, GatherGetsUptime) {
-  zx_handle_t root_resource;
-  zx_status_t ret = harvester::GetRootResource(&root_resource);
+  zx_handle_t info_resource;
+  zx_status_t ret = harvester::GetInfoResource(&info_resource);
   ASSERT_EQ(ret, ZX_OK);
   harvester::DockyardProxyFake dockyard_proxy;
   std::unique_ptr<FakeAnnotationsProvider> annotations_provider =
       std::make_unique<FakeAnnotationsProvider>();
 
-  harvester::GatherDeviceInfo gatherer(root_resource, &dockyard_proxy,
+  harvester::GatherDeviceInfo gatherer(info_resource, &dockyard_proxy,
                                        std::move(annotations_provider));
 
   gatherer.Gather();
diff --git a/src/developer/system_monitor/bin/harvester/gather_inspectable.h b/src/developer/system_monitor/bin/harvester/gather_inspectable.h
index 997b477..ed3dc5a 100644
--- a/src/developer/system_monitor/bin/harvester/gather_inspectable.h
+++ b/src/developer/system_monitor/bin/harvester/gather_inspectable.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef GARNET_BIN_SYSTEM_MONITOR_HARVESTER_GATHER_INSPECTABLE_H_
-#define GARNET_BIN_SYSTEM_MONITOR_HARVESTER_GATHER_INSPECTABLE_H_
+#ifndef SRC_DEVELOPER_SYSTEM_MONITOR_BIN_HARVESTER_GATHER_INSPECTABLE_H_
+#define SRC_DEVELOPER_SYSTEM_MONITOR_BIN_HARVESTER_GATHER_INSPECTABLE_H_
 
 #include "gather_category.h"
 
@@ -12,9 +12,9 @@
 // Collect a list of components that have inspect data.
 class GatherInspectable : public GatherCategory {
  public:
-  GatherInspectable(zx_handle_t root_resource,
+  GatherInspectable(zx_handle_t info_resource,
                     harvester::DockyardProxy* dockyard_proxy)
-      : GatherCategory(root_resource, dockyard_proxy) {}
+      : GatherCategory(info_resource, dockyard_proxy) {}
 
   // GatherCategory.
   void Gather() override;
@@ -22,4 +22,4 @@
 
 }  // namespace harvester
 
-#endif  // GARNET_BIN_SYSTEM_MONITOR_HARVESTER_GATHER_INSPECTABLE_H_
+#endif  // SRC_DEVELOPER_SYSTEM_MONITOR_BIN_HARVESTER_GATHER_INSPECTABLE_H_
diff --git a/src/developer/system_monitor/bin/harvester/gather_inspectable_test.cc b/src/developer/system_monitor/bin/harvester/gather_inspectable_test.cc
index 94c18b4..1706106 100644
--- a/src/developer/system_monitor/bin/harvester/gather_inspectable_test.cc
+++ b/src/developer/system_monitor/bin/harvester/gather_inspectable_test.cc
@@ -14,10 +14,10 @@
 };
 
 TEST_F(GatherInspectableTest, DISABLED_Inspectable) {
-  zx_handle_t root_resource = 0;
+  zx_handle_t info_resource = 0;
   harvester::DockyardProxyFake dockyard_proxy;
 
-  harvester::GatherInspectable gatherer(root_resource, &dockyard_proxy);
+  harvester::GatherInspectable gatherer(info_resource, &dockyard_proxy);
   gatherer.Gather();
   std::string test_string;
   EXPECT_TRUE(dockyard_proxy.CheckStringPrefixSent(
diff --git a/src/developer/system_monitor/bin/harvester/gather_introspection.h b/src/developer/system_monitor/bin/harvester/gather_introspection.h
index ca6a4c0..46d1de4 100644
--- a/src/developer/system_monitor/bin/harvester/gather_introspection.h
+++ b/src/developer/system_monitor/bin/harvester/gather_introspection.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef GARNET_BIN_SYSTEM_MONITOR_HARVESTER_GATHER_INTROSPECTION_H_
-#define GARNET_BIN_SYSTEM_MONITOR_HARVESTER_GATHER_INTROSPECTION_H_
+#ifndef SRC_DEVELOPER_SYSTEM_MONITOR_BIN_HARVESTER_GATHER_INTROSPECTION_H_
+#define SRC_DEVELOPER_SYSTEM_MONITOR_BIN_HARVESTER_GATHER_INTROSPECTION_H_
 
 #include "gather_category.h"
 
@@ -12,9 +12,9 @@
 // Gather inspect information for components.
 class GatherIntrospection : public GatherCategory {
  public:
-  GatherIntrospection(zx_handle_t root_resource,
+  GatherIntrospection(zx_handle_t info_resource,
                       harvester::DockyardProxy* dockyard_proxy)
-      : GatherCategory(root_resource, dockyard_proxy) {}
+      : GatherCategory(info_resource, dockyard_proxy) {}
 
   // GatherCategory.
   void Gather() override;
@@ -22,4 +22,4 @@
 
 }  // namespace harvester
 
-#endif  // GARNET_BIN_SYSTEM_MONITOR_HARVESTER_GATHER_INTROSPECTION_H_
+#endif  // SRC_DEVELOPER_SYSTEM_MONITOR_BIN_HARVESTER_GATHER_INTROSPECTION_H_
diff --git a/src/developer/system_monitor/bin/harvester/gather_introspection_test.cc b/src/developer/system_monitor/bin/harvester/gather_introspection_test.cc
index ca4cf7c..d54ba1e 100644
--- a/src/developer/system_monitor/bin/harvester/gather_introspection_test.cc
+++ b/src/developer/system_monitor/bin/harvester/gather_introspection_test.cc
@@ -14,10 +14,10 @@
 };
 
 TEST_F(GatherIntrospectionTest, Introspection) {
-  zx_handle_t root_resource = 0;
+  zx_handle_t info_resource = 0;
   harvester::DockyardProxyFake dockyard_proxy;
 
-  harvester::GatherIntrospection gatherer(root_resource, &dockyard_proxy);
+  harvester::GatherIntrospection gatherer(info_resource, &dockyard_proxy);
   gatherer.Gather();
   std::string test_string;
   EXPECT_TRUE(dockyard_proxy.CheckJsonSent("inspect:/hub/fake/234/faux.Inspect",
diff --git a/src/developer/system_monitor/bin/harvester/gather_memory.cc b/src/developer/system_monitor/bin/harvester/gather_memory.cc
index ef94fb3..bbd0c70 100644
--- a/src/developer/system_monitor/bin/harvester/gather_memory.cc
+++ b/src/developer/system_monitor/bin/harvester/gather_memory.cc
@@ -12,9 +12,9 @@
 
 namespace harvester {
 
-void AddGlobalMemorySamples(SampleBundle* samples, zx_handle_t root_resource) {
+void AddGlobalMemorySamples(SampleBundle* samples, zx_handle_t info_resource) {
   zx_info_kmem_stats_t stats;
-  zx_status_t err = zx_object_get_info(root_resource, ZX_INFO_KMEM_STATS,
+  zx_status_t err = zx_object_get_info(info_resource, ZX_INFO_KMEM_STATS,
                                        &stats, sizeof(stats),
                                        /*actual=*/nullptr, /*avail=*/nullptr);
   if (err != ZX_OK) {
@@ -55,7 +55,7 @@
 void GatherMemory::GatherDeviceProperties() {
   const std::string DEVICE_TOTAL = "memory:device_total_bytes";
   zx_info_kmem_stats_t stats;
-  zx_status_t err = zx_object_get_info(RootResource(), ZX_INFO_KMEM_STATS,
+  zx_status_t err = zx_object_get_info(InfoResource(), ZX_INFO_KMEM_STATS,
                                        &stats, sizeof(stats),
                                        /*actual=*/nullptr, /*avail=*/nullptr);
   if (err != ZX_OK) {
@@ -73,7 +73,7 @@
 
 void GatherMemory::Gather() {
   SampleBundle samples;
-  AddGlobalMemorySamples(&samples, RootResource());
+  AddGlobalMemorySamples(&samples, InfoResource());
   samples.Upload(DockyardPtr());
 }
 
diff --git a/src/developer/system_monitor/bin/harvester/gather_memory.h b/src/developer/system_monitor/bin/harvester/gather_memory.h
index 4cc7c0b..c4091bd 100644
--- a/src/developer/system_monitor/bin/harvester/gather_memory.h
+++ b/src/developer/system_monitor/bin/harvester/gather_memory.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef GARNET_BIN_SYSTEM_MONITOR_HARVESTER_GATHER_MEMORY_H_
-#define GARNET_BIN_SYSTEM_MONITOR_HARVESTER_GATHER_MEMORY_H_
+#ifndef SRC_DEVELOPER_SYSTEM_MONITOR_BIN_HARVESTER_GATHER_MEMORY_H_
+#define SRC_DEVELOPER_SYSTEM_MONITOR_BIN_HARVESTER_GATHER_MEMORY_H_
 
 #include "gather_category.h"
 
@@ -11,14 +11,14 @@
 
 class SampleBundle;
 
-void AddGlobalMemorySamples(SampleBundle* samples, zx_handle_t root_resource);
+void AddGlobalMemorySamples(SampleBundle* samples, zx_handle_t info_resource);
 
 // Gather high level memory information from the kernel.
 class GatherMemory : public GatherCategory {
  public:
-  GatherMemory(zx_handle_t root_resource,
+  GatherMemory(zx_handle_t info_resource,
                harvester::DockyardProxy* dockyard_proxy)
-      : GatherCategory(root_resource, dockyard_proxy) {}
+      : GatherCategory(info_resource, dockyard_proxy) {}
 
   // GatherCategory.
   void GatherDeviceProperties() override;
@@ -27,4 +27,4 @@
 
 }  // namespace harvester
 
-#endif  // GARNET_BIN_SYSTEM_MONITOR_HARVESTER_GATHER_MEMORY_H_
+#endif  // SRC_DEVELOPER_SYSTEM_MONITOR_BIN_HARVESTER_GATHER_MEMORY_H_
diff --git a/src/developer/system_monitor/bin/harvester/gather_memory_digest.h b/src/developer/system_monitor/bin/harvester/gather_memory_digest.h
index 6060dd2..e8f7e64 100644
--- a/src/developer/system_monitor/bin/harvester/gather_memory_digest.h
+++ b/src/developer/system_monitor/bin/harvester/gather_memory_digest.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef GARNET_BIN_SYSTEM_MONITOR_HARVESTER_GATHER_MEMORY_DIGEST_H_
-#define GARNET_BIN_SYSTEM_MONITOR_HARVESTER_GATHER_MEMORY_DIGEST_H_
+#ifndef SRC_DEVELOPER_SYSTEM_MONITOR_BIN_HARVESTER_GATHER_MEMORY_DIGEST_H_
+#define SRC_DEVELOPER_SYSTEM_MONITOR_BIN_HARVESTER_GATHER_MEMORY_DIGEST_H_
 
 #include "gather_category.h"
 #include "src/developer/memory/metrics/digest.h"
@@ -15,9 +15,9 @@
 // categories. I.e. it creates a digest of the memory usage.
 class GatherMemoryDigest : public GatherCategory {
  public:
-  GatherMemoryDigest(zx_handle_t root_resource,
+  GatherMemoryDigest(zx_handle_t info_resource,
                      harvester::DockyardProxy* dockyard_proxy)
-      : GatherCategory(root_resource, dockyard_proxy) {}
+      : GatherCategory(info_resource, dockyard_proxy) {}
 
   // GatherCategory.
   void Gather() override;
@@ -29,4 +29,4 @@
 
 }  // namespace harvester
 
-#endif  // GARNET_BIN_SYSTEM_MONITOR_HARVESTER_GATHER_MEMORY_DIGEST_H_
+#endif  // SRC_DEVELOPER_SYSTEM_MONITOR_BIN_HARVESTER_GATHER_MEMORY_DIGEST_H_
diff --git a/src/developer/system_monitor/bin/harvester/gather_memory_digest_test.cc b/src/developer/system_monitor/bin/harvester/gather_memory_digest_test.cc
index d6b2213..991c12a 100644
--- a/src/developer/system_monitor/bin/harvester/gather_memory_digest_test.cc
+++ b/src/developer/system_monitor/bin/harvester/gather_memory_digest_test.cc
@@ -7,21 +7,21 @@
 #include <gtest/gtest.h>
 
 #include "dockyard_proxy_fake.h"
-#include "root_resource.h"
+#include "info_resource.h"
 
 namespace harvester {
-  const std::map<std::string, std::string>& GetBucketMap();
+const std::map<std::string, std::string>& GetBucketMap();
 }  // namespace harvester.
 
 class GatherMemoryDigestTest : public ::testing::Test {};
 
 TEST_F(GatherMemoryDigestTest, Inspectable) {
-  zx_handle_t root_resource;
-  zx_status_t ret = harvester::GetRootResource(&root_resource);
+  zx_handle_t info_resource;
+  zx_status_t ret = harvester::GetInfoResource(&info_resource);
   ASSERT_EQ(ret, ZX_OK);
   harvester::DockyardProxyFake dockyard_proxy;
 
-  harvester::GatherMemoryDigest gatherer(root_resource, &dockyard_proxy);
+  harvester::GatherMemoryDigest gatherer(info_resource, &dockyard_proxy);
 
   // No samples are currently gathered in this call. Test that  this is
   // available to call and doesn't crash.
diff --git a/src/developer/system_monitor/bin/harvester/gather_memory_test.cc b/src/developer/system_monitor/bin/harvester/gather_memory_test.cc
index 650a73ae..3637437 100644
--- a/src/developer/system_monitor/bin/harvester/gather_memory_test.cc
+++ b/src/developer/system_monitor/bin/harvester/gather_memory_test.cc
@@ -7,17 +7,17 @@
 #include <gtest/gtest.h>
 
 #include "dockyard_proxy_fake.h"
-#include "root_resource.h"
+#include "info_resource.h"
 
 class GatherMemoryTest : public ::testing::Test {};
 
 TEST_F(GatherMemoryTest, Inspectable) {
-  zx_handle_t root_resource;
-  zx_status_t ret = harvester::GetRootResource(&root_resource);
+  zx_handle_t info_resource;
+  zx_status_t ret = harvester::GetInfoResource(&info_resource);
   ASSERT_EQ(ret, ZX_OK);
   harvester::DockyardProxyFake dockyard_proxy;
 
-  harvester::GatherMemory gatherer(root_resource, &dockyard_proxy);
+  harvester::GatherMemory gatherer(info_resource, &dockyard_proxy);
   uint64_t test_value;
 
   gatherer.GatherDeviceProperties();
diff --git a/src/developer/system_monitor/bin/harvester/gather_processes_and_memory.cc b/src/developer/system_monitor/bin/harvester/gather_processes_and_memory.cc
index 0b81166..eaa90be 100644
--- a/src/developer/system_monitor/bin/harvester/gather_processes_and_memory.cc
+++ b/src/developer/system_monitor/bin/harvester/gather_processes_and_memory.cc
@@ -16,8 +16,8 @@
 namespace harvester {
 
 GatherProcessesAndMemory::GatherProcessesAndMemory(
-    zx_handle_t root_resource, harvester::DockyardProxy* dockyard_proxy)
-    : GatherCategory(root_resource, dockyard_proxy) {}
+    zx_handle_t info_resource, harvester::DockyardProxy* dockyard_proxy)
+    : GatherCategory(info_resource, dockyard_proxy) {}
 
 void GatherProcessesAndMemory::Gather() {
   SampleBundle samples;
@@ -30,7 +30,7 @@
     AddTaskBasics(&samples, task_tree->Threads(), dockyard::KoidType::THREAD);
   }
   AddProcessStats(&samples, task_tree->Processes());
-  AddGlobalMemorySamples(&samples, RootResource());
+  AddGlobalMemorySamples(&samples, InfoResource());
   samples.Upload(DockyardPtr());
   actions_.NextInterval();
 }
diff --git a/src/developer/system_monitor/bin/harvester/gather_processes_and_memory.h b/src/developer/system_monitor/bin/harvester/gather_processes_and_memory.h
index 178cbec..a1016b40 100644
--- a/src/developer/system_monitor/bin/harvester/gather_processes_and_memory.h
+++ b/src/developer/system_monitor/bin/harvester/gather_processes_and_memory.h
@@ -30,7 +30,7 @@
 // Gather samples for process and global memory stats.
 class GatherProcessesAndMemory : public GatherCategory {
  public:
-  GatherProcessesAndMemory(zx_handle_t root_resource,
+  GatherProcessesAndMemory(zx_handle_t info_resource,
                            harvester::DockyardProxy* dockyard_proxy);
 
   // GatherCategory.
diff --git a/src/developer/system_monitor/bin/harvester/gather_processes_and_memory_test.cc b/src/developer/system_monitor/bin/harvester/gather_processes_and_memory_test.cc
index f1d1935..49c00b2 100644
--- a/src/developer/system_monitor/bin/harvester/gather_processes_and_memory_test.cc
+++ b/src/developer/system_monitor/bin/harvester/gather_processes_and_memory_test.cc
@@ -9,7 +9,7 @@
 #include <gtest/gtest.h>
 
 #include "dockyard_proxy_fake.h"
-#include "root_resource.h"
+#include "info_resource.h"
 
 class GatherProcessesAndMemoryTest : public ::testing::Test {
  public:
@@ -35,10 +35,10 @@
 };
 
 TEST_F(GatherProcessesAndMemoryTest, MemoryStats) {
-  zx_handle_t root_resource;
-  ASSERT_EQ(harvester::GetRootResource(&root_resource), ZX_OK);
+  zx_handle_t info_resource;
+  ASSERT_EQ(harvester::GetInfoResource(&info_resource), ZX_OK);
   harvester::DockyardProxyFake dockyard_proxy;
-  harvester::GatherProcessesAndMemory gatherer(root_resource, &dockyard_proxy);
+  harvester::GatherProcessesAndMemory gatherer(info_resource, &dockyard_proxy);
   gatherer.Gather();
 
   std::string test_string;
diff --git a/src/developer/system_monitor/bin/harvester/gather_tasks.h b/src/developer/system_monitor/bin/harvester/gather_tasks.h
index 5239917..8e3e1fe 100644
--- a/src/developer/system_monitor/bin/harvester/gather_tasks.h
+++ b/src/developer/system_monitor/bin/harvester/gather_tasks.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef GARNET_BIN_SYSTEM_MONITOR_HARVESTER_GATHER_TASKS_H_
-#define GARNET_BIN_SYSTEM_MONITOR_HARVESTER_GATHER_TASKS_H_
+#ifndef SRC_DEVELOPER_SYSTEM_MONITOR_BIN_HARVESTER_GATHER_TASKS_H_
+#define SRC_DEVELOPER_SYSTEM_MONITOR_BIN_HARVESTER_GATHER_TASKS_H_
 
 #include "gather_category.h"
 #include "task_tree.h"
@@ -25,9 +25,9 @@
 // Gather Samples for jobs, processes, and threads.
 class GatherTasks : public GatherCategory {
  public:
-  GatherTasks(zx_handle_t root_resource,
+  GatherTasks(zx_handle_t info_resource,
               harvester::DockyardProxy* dockyard_proxy)
-      : GatherCategory(root_resource, dockyard_proxy) {}
+      : GatherCategory(info_resource, dockyard_proxy) {}
 
   // GatherCategory.
   void Gather() override;
@@ -35,4 +35,4 @@
 
 }  // namespace harvester
 
-#endif  // GARNET_BIN_SYSTEM_MONITOR_HARVESTER_GATHER_TASKS_H_
+#endif  // SRC_DEVELOPER_SYSTEM_MONITOR_BIN_HARVESTER_GATHER_TASKS_H_
diff --git a/src/developer/system_monitor/bin/harvester/gather_tasks_test.cc b/src/developer/system_monitor/bin/harvester/gather_tasks_test.cc
index 2d58cd7..e3aae3b 100644
--- a/src/developer/system_monitor/bin/harvester/gather_tasks_test.cc
+++ b/src/developer/system_monitor/bin/harvester/gather_tasks_test.cc
@@ -9,7 +9,7 @@
 #include <gtest/gtest.h>
 
 #include "dockyard_proxy_fake.h"
-#include "root_resource.h"
+#include "info_resource.h"
 
 class GatherTasksTest : public ::testing::Test {
  public:
@@ -35,10 +35,10 @@
 };
 
 TEST_F(GatherTasksTest, MemoryData) {
-  zx_handle_t root_resource;
-  ASSERT_EQ(harvester::GetRootResource(&root_resource), ZX_OK);
+  zx_handle_t info_resource;
+  ASSERT_EQ(harvester::GetInfoResource(&info_resource), ZX_OK);
   harvester::DockyardProxyFake dockyard_proxy;
-  harvester::GatherTasks gatherer(root_resource, &dockyard_proxy);
+  harvester::GatherTasks gatherer(info_resource, &dockyard_proxy);
   gatherer.Gather();
 
   std::string test_string;
diff --git a/src/developer/system_monitor/bin/harvester/gather_threads_and_cpu.cc b/src/developer/system_monitor/bin/harvester/gather_threads_and_cpu.cc
index f5a9f16..37a6f4d 100644
--- a/src/developer/system_monitor/bin/harvester/gather_threads_and_cpu.cc
+++ b/src/developer/system_monitor/bin/harvester/gather_threads_and_cpu.cc
@@ -16,8 +16,8 @@
 namespace harvester {
 
 GatherThreadsAndCpu::GatherThreadsAndCpu(
-    zx_handle_t root_resource, harvester::DockyardProxy* dockyard_proxy)
-    : GatherCategory(root_resource, dockyard_proxy) {}
+    zx_handle_t info_resource, harvester::DockyardProxy* dockyard_proxy)
+    : GatherCategory(info_resource, dockyard_proxy) {}
 
 void GatherThreadsAndCpu::Gather() {
   SampleBundle samples;
@@ -30,7 +30,7 @@
     AddTaskBasics(&samples, task_tree->Threads(), dockyard::KoidType::THREAD);
   }
   AddThreadStats(&samples, task_tree->Threads());
-  AddGlobalCpuSamples(&samples, RootResource());
+  AddGlobalCpuSamples(&samples, InfoResource());
   samples.Upload(DockyardPtr());
   actions_.NextInterval();
 }
diff --git a/src/developer/system_monitor/bin/harvester/gather_threads_and_cpu.h b/src/developer/system_monitor/bin/harvester/gather_threads_and_cpu.h
index b2cf4d5..d777f8b 100644
--- a/src/developer/system_monitor/bin/harvester/gather_threads_and_cpu.h
+++ b/src/developer/system_monitor/bin/harvester/gather_threads_and_cpu.h
@@ -32,7 +32,7 @@
 // Gather samples for threads and global CPU stats.
 class GatherThreadsAndCpu : public GatherCategory {
  public:
-  GatherThreadsAndCpu(zx_handle_t root_resource,
+  GatherThreadsAndCpu(zx_handle_t info_resource,
                       harvester::DockyardProxy* dockyard_proxy);
 
   // GatherCategory.
diff --git a/src/developer/system_monitor/bin/harvester/gather_threads_and_cpu_test.cc b/src/developer/system_monitor/bin/harvester/gather_threads_and_cpu_test.cc
index 79e0a062..ee6f5e9 100644
--- a/src/developer/system_monitor/bin/harvester/gather_threads_and_cpu_test.cc
+++ b/src/developer/system_monitor/bin/harvester/gather_threads_and_cpu_test.cc
@@ -10,7 +10,7 @@
 #include <gtest/gtest.h>
 
 #include "dockyard_proxy_fake.h"
-#include "root_resource.h"
+#include "info_resource.h"
 
 class GatherThreadsAndCpuTest : public ::testing::Test {
  public:
@@ -36,10 +36,10 @@
 };
 
 TEST_F(GatherThreadsAndCpuTest, Inspectable) {
-  zx_handle_t root_resource;
-  ASSERT_EQ(harvester::GetRootResource(&root_resource), ZX_OK);
+  zx_handle_t info_resource;
+  ASSERT_EQ(harvester::GetInfoResource(&info_resource), ZX_OK);
   harvester::DockyardProxyFake dockyard_proxy;
-  harvester::GatherThreadsAndCpu gatherer(root_resource, &dockyard_proxy);
+  harvester::GatherThreadsAndCpu gatherer(info_resource, &dockyard_proxy);
   gatherer.Gather();
 
   std::string test_string;
diff --git a/src/developer/system_monitor/bin/harvester/gather_vmos.h b/src/developer/system_monitor/bin/harvester/gather_vmos.h
index 24de789c..cb14d8a 100644
--- a/src/developer/system_monitor/bin/harvester/gather_vmos.h
+++ b/src/developer/system_monitor/bin/harvester/gather_vmos.h
@@ -48,11 +48,11 @@
     char name[ZX_MAX_NAME_LEN];
   };
 
-  GatherVmos(zx_handle_t root_resource,
+  GatherVmos(zx_handle_t info_resource,
              harvester::DockyardProxy* dockyard_proxy,
-             harvester::TaskTree& task_tree,
-             OS* os)
-      : GatherCategory(root_resource, dockyard_proxy), task_tree_(task_tree),
+             harvester::TaskTree& task_tree, OS* os)
+      : GatherCategory(info_resource, dockyard_proxy),
+        task_tree_(task_tree),
         os_(os) {}
 
   // GatherCategory.
@@ -155,4 +155,3 @@
 }  // namespace harvester
 
 #endif  // SRC_DEVELOPER_SYSTEM_MONITOR_BIN_HARVESTER_GATHER_VMOS_H_
-
diff --git a/src/developer/system_monitor/bin/harvester/gather_vmos_test.cc b/src/developer/system_monitor/bin/harvester/gather_vmos_test.cc
index 6a38116..a20145e 100644
--- a/src/developer/system_monitor/bin/harvester/gather_vmos_test.cc
+++ b/src/developer/system_monitor/bin/harvester/gather_vmos_test.cc
@@ -4,14 +4,15 @@
 
 #include "gather_vmos.h"
 
+#include <zircon/process.h>
+
 #include <algorithm>
 
 #include <gmock/gmock.h>
 #include <gtest/gtest.h>
-#include <zircon/process.h>
 
 #include "dockyard_proxy_fake.h"
-#include "root_resource.h"
+#include "info_resource.h"
 
 using ::testing::_;
 using ::testing::IsNull;
@@ -40,7 +41,7 @@
 class GatherVmosTest : public ::testing::Test {
  public:
   void SetUp() override {
-    ASSERT_EQ(harvester::GetRootResource(&root_resource_), ZX_OK);
+    ASSERT_EQ(harvester::GetInfoResource(&info_resource_), ZX_OK);
   }
 
   zx_info_vmo_t MakeVmo(zx_koid_t vmo_koid, size_t size_bytes,
@@ -52,18 +53,18 @@
                                   size_t size_bytes, size_t committed_bytes,
                                   const char* name) {
     zx_info_vmo_t vmo = {
-      .koid = vmo_koid,
-      .size_bytes = size_bytes,
-      .parent_koid = parent_vmo_koid,
-      .committed_bytes = committed_bytes,
+        .koid = vmo_koid,
+        .size_bytes = size_bytes,
+        .parent_koid = parent_vmo_koid,
+        .committed_bytes = committed_bytes,
     };
     strlcpy(vmo.name, name, sizeof(vmo.name));
     return vmo;
   }
 
   zx_status_t GetVmoCount(zx_handle_t parent, int children_kind,
-                          void* out_buffer, size_t buffer_size,
-                          size_t* actual, size_t* avail) {
+                          void* out_buffer, size_t buffer_size, size_t* actual,
+                          size_t* avail) {
     if (process_handle_to_vmos_.count(parent) == 0) {
       ADD_FAILURE() << "Warning: unexpected handle " << parent;
       return ZX_ERR_BAD_HANDLE;
@@ -76,8 +77,8 @@
   }
 
   zx_status_t GetVmoInfo(zx_handle_t parent, int children_kind,
-                         void* out_buffer, size_t buffer_size,
-                         size_t* actual, size_t* avail) {
+                         void* out_buffer, size_t buffer_size, size_t* actual,
+                         size_t* avail) {
     size_t capacity = buffer_size / sizeof(zx_info_vmo_t);
 
     if (process_handle_to_vmos_.count(parent) == 0) {
@@ -88,7 +89,7 @@
 
     *actual = std::min(vmos.size(), capacity);
     *avail = vmos.size();
-    memcpy(out_buffer, (void*) vmos.data(), *actual * sizeof(zx_info_vmo_t));
+    memcpy(out_buffer, (void*)vmos.data(), *actual * sizeof(zx_info_vmo_t));
 
     return ZX_OK;
   }
@@ -115,12 +116,12 @@
   harvester::DockyardProxyFake dockyard_proxy_;
   std::vector<harvester::TaskTree::Task> processes_;
   std::map<zx_handle_t, std::vector<zx_info_vmo_t>> process_handle_to_vmos_;
-  zx_handle_t root_resource_;
+  zx_handle_t info_resource_;
 };
 
 TEST_F(GatherVmosTest, NoRootedVmos) {
-  harvester::GatherVmos gatherer(
-      root_resource_, &dockyard_proxy_, task_tree_, &os_);
+  harvester::GatherVmos gatherer(info_resource_, &dockyard_proxy_, task_tree_,
+                                 &os_);
 
   // Build a task tree of:
   //
@@ -132,12 +133,12 @@
   //
   // Where everything but 1 is a process.
   processes_ = {
-    // These tuples are {handle, koid, parent koid}.
-    // The top level parent 1 is hidden because it's not a process.
-    {2, 2, 1},
+      // These tuples are {handle, koid, parent koid}.
+      // The top level parent 1 is hidden because it's not a process.
+      {2, 2, 1},
       {3, 3, 2},
       {4, 4, 2},
-    {5, 5, 1},
+      {5, 5, 1},
   };
   ON_CALL(task_tree_, Processes()).WillByDefault(ReturnRef(processes_));
 
@@ -156,20 +157,18 @@
 
   uint64_t test_value;
   for (auto& process : processes_) {
-    EXPECT_TRUE(
-        dockyard_proxy_.CheckValueSent(
-            KoidPath(process.koid, "vmo_Sysmem-core"), &test_value));
+    EXPECT_TRUE(dockyard_proxy_.CheckValueSent(
+        KoidPath(process.koid, "vmo_Sysmem-core"), &test_value));
     EXPECT_EQ(test_value, 0UL);
-    EXPECT_TRUE(
-        dockyard_proxy_.CheckValueSent(
-            KoidPath(process.koid, "vmo_Sysmem-contig-core"), &test_value));
+    EXPECT_TRUE(dockyard_proxy_.CheckValueSent(
+        KoidPath(process.koid, "vmo_Sysmem-contig-core"), &test_value));
     EXPECT_EQ(test_value, 0UL);
   }
 }
 
 TEST_F(GatherVmosTest, RootedVmos_WithNestedDescendants) {
-  harvester::GatherVmos gatherer(
-      root_resource_, &dockyard_proxy_, task_tree_, &os_);
+  harvester::GatherVmos gatherer(info_resource_, &dockyard_proxy_, task_tree_,
+                                 &os_);
 
   // Build a task tree of:
   //
@@ -181,12 +180,12 @@
   //
   // Where everything but 1 is a process.
   processes_ = {
-    // These tuples are {handle, koid, parent koid}.
-    // The top level parent 1 is hidden because it's not a process.
-    {2, 2, 1},
+      // These tuples are {handle, koid, parent koid}.
+      // The top level parent 1 is hidden because it's not a process.
+      {2, 2, 1},
       {3, 3, 2},
       {4, 4, 2},
-    {5, 5, 1},
+      {5, 5, 1},
   };
   ON_CALL(task_tree_, Processes()).WillByDefault(ReturnRef(processes_));
 
@@ -197,16 +196,14 @@
   // <koid:2> will take one page to hand out later.
   zx_info_vmo_t intermediate_vmo =
       MakeVmoWithParent(102, root_vmo.koid, 4096, 4096, "Sysmem-core");
-  zx_info_vmo_t nonrooted_vmo =
-      MakeVmo(202, 4096 * 2, 4096 * 2, "scudo");
+  zx_info_vmo_t nonrooted_vmo = MakeVmo(202, 4096 * 2, 4096 * 2, "scudo");
   process_handle_to_vmos_[2] = {intermediate_vmo, nonrooted_vmo};
 
   // <koid:4> will take the page from <koid:2>. Even though the VMO has been
   // assigned a slightly different name, GatherVmos will still find it using the
   // parent/child relationship.
-  zx_info_vmo_t child_vmo =
-      MakeVmoWithParent(104, intermediate_vmo.koid, 4096, 4096,
-                        "Sysmem-core-child");
+  zx_info_vmo_t child_vmo = MakeVmoWithParent(104, intermediate_vmo.koid, 4096,
+                                              4096, "Sysmem-core-child");
   process_handle_to_vmos_[4] = {child_vmo};
 
   // <koid:3> gets a non-rooted page.
@@ -223,17 +220,14 @@
 
   // Check that <koid:1> has nothing sent since it's a job.
   uint64_t test_value;
-  EXPECT_FALSE(
-      dockyard_proxy_.CheckValueSent(
-          KoidPath(1, "vmo_Sysmem-core"), &test_value));
+  EXPECT_FALSE(dockyard_proxy_.CheckValueSent(KoidPath(1, "vmo_Sysmem-core"),
+                                              &test_value));
 
   // Check that scudo information is not sent (it is not rooted memory).
   EXPECT_FALSE(
-      dockyard_proxy_.CheckValueSent(
-          KoidPath(1, "vmo_scudo"), &test_value));
+      dockyard_proxy_.CheckValueSent(KoidPath(1, "vmo_scudo"), &test_value));
   EXPECT_FALSE(
-      dockyard_proxy_.CheckValueSent(
-          KoidPath(1, "vmo_scudo"), &test_value));
+      dockyard_proxy_.CheckValueSent(KoidPath(1, "vmo_scudo"), &test_value));
 
   // <koid:5> should retain 3 rooted pages, <koid:2> and <koid:3> none, and
   // <koid:4> one page.
@@ -244,25 +238,20 @@
 
   // No processes should have memory from a *different* sysmem VMO.
   for (auto& process : processes_) {
-    EXPECT_EQ(
-        GetValueForPath(KoidPath(process.koid, "vmo_Sysmem-contig-core")), 0UL);
+    EXPECT_EQ(GetValueForPath(KoidPath(process.koid, "vmo_Sysmem-contig-core")),
+              0UL);
   }
 }
 
 TEST_F(GatherVmosTest, TracksChangesOverTime) {
-  harvester::GatherVmos gatherer(
-      root_resource_, &dockyard_proxy_, task_tree_, &os_);
+  harvester::GatherVmos gatherer(info_resource_, &dockyard_proxy_, task_tree_,
+                                 &os_);
 
   // Build a list of processes all under root job 0.
   processes_ = {
-    // These tuples are {handle, koid, parent koid}.
-    // The top level parent 0 is hidden because it's not a process.
-    {1, 1, 0},
-    {2, 2, 0},
-    {3, 3, 0},
-    {4, 4, 0},
-    {5, 5, 0},
-    {6, 6, 0},
+      // These tuples are {handle, koid, parent koid}.
+      // The top level parent 0 is hidden because it's not a process.
+      {1, 1, 0}, {2, 2, 0}, {3, 3, 0}, {4, 4, 0}, {5, 5, 0}, {6, 6, 0},
   };
   ON_CALL(task_tree_, Processes()).WillByDefault(ReturnRef(processes_));
 
@@ -281,8 +270,7 @@
 
   // No processes should have sysmem VMOs.
   for (auto& process : processes_) {
-    EXPECT_EQ(
-        GetValueForPath(KoidPath(process.koid, "vmo_Sysmem-core")), 0UL);
+    EXPECT_EQ(GetValueForPath(KoidPath(process.koid, "vmo_Sysmem-core")), 0UL);
   }
 
   // Give <koid:2> a rooted VMO.
@@ -299,8 +287,7 @@
   // Only the first 3 processes should be checked; <koid:2>'s new sysmem VMO
   // should not be detected yet.
   for (auto& process : processes_) {
-    EXPECT_EQ(
-        GetValueForPath(KoidPath(process.koid, "vmo_Sysmem-core")), 0UL);
+    EXPECT_EQ(GetValueForPath(KoidPath(process.koid, "vmo_Sysmem-core")), 0UL);
   }
 
   // Add a new <koid:7> with a rooted VMO.
@@ -308,7 +295,6 @@
   zx_info_vmo_t contig_vmo = MakeVmo(107, 4096, 4096, "Sysmem-contig-core");
   process_handle_to_vmos_[7] = {contig_vmo};
 
-
   // Scan queue: [2, 3, 5, 6, 1, 7]. Gather will see <koid:2>'s VMO. New
   // processes are always scanned.
   gatherer.Gather();
@@ -321,4 +307,3 @@
   EXPECT_EQ(GetValueForPath(KoidPath(7, "vmo_Sysmem-core")), 0UL);
   EXPECT_EQ(GetValueForPath(KoidPath(7, "vmo_Sysmem-contig-core")), 4096UL);
 }
-
diff --git a/src/developer/system_monitor/bin/harvester/harvester.cc b/src/developer/system_monitor/bin/harvester/harvester.cc
index a76ba67..6734665 100644
--- a/src/developer/system_monitor/bin/harvester/harvester.cc
+++ b/src/developer/system_monitor/bin/harvester/harvester.cc
@@ -26,10 +26,10 @@
 
 namespace harvester {
 
-Harvester::Harvester(zx_handle_t root_resource,
+Harvester::Harvester(zx_handle_t info_resource,
                      std::unique_ptr<DockyardProxy> dockyard_proxy,
                      std::unique_ptr<OS> os)
-    : root_resource_(root_resource),
+    : info_resource_(info_resource),
       dockyard_proxy_(std::move(dockyard_proxy)),
       os_(std::move(os)),
       log_listener_(sys::ServiceDirectory::CreateFromNamespace()) {}
diff --git a/src/developer/system_monitor/bin/harvester/harvester.h b/src/developer/system_monitor/bin/harvester/harvester.h
index f157395..75ccd19 100644
--- a/src/developer/system_monitor/bin/harvester/harvester.h
+++ b/src/developer/system_monitor/bin/harvester/harvester.h
@@ -29,7 +29,7 @@
 // different types of Dockyard Samples as directed by the Harvester.
 class Harvester {
  public:
-  Harvester(zx_handle_t root_resource,
+  Harvester(zx_handle_t info_resource,
             std::unique_ptr<DockyardProxy> dockyard_proxy,
             std::unique_ptr<OS> os);
 
@@ -44,26 +44,26 @@
   void GatherSlowData(async_dispatcher_t* dispatcher);
 
  private:
-  zx_handle_t root_resource_;
+  zx_handle_t info_resource_;
   std::unique_ptr<harvester::DockyardProxy> dockyard_proxy_;
   std::unique_ptr<harvester::OS> os_;
   LogListener log_listener_;
 
-  GatherChannels gather_channels_{root_resource_, dockyard_proxy_.get()};
-  GatherCpu gather_cpu_{root_resource_, dockyard_proxy_.get()};
-  GatherDeviceInfo gather_device_info_{root_resource_, dockyard_proxy_.get()};
-  GatherInspectable gather_inspectable_{root_resource_, dockyard_proxy_.get()};
-  GatherIntrospection gather_introspection_{root_resource_,
+  GatherChannels gather_channels_{info_resource_, dockyard_proxy_.get()};
+  GatherCpu gather_cpu_{info_resource_, dockyard_proxy_.get()};
+  GatherDeviceInfo gather_device_info_{info_resource_, dockyard_proxy_.get()};
+  GatherInspectable gather_inspectable_{info_resource_, dockyard_proxy_.get()};
+  GatherIntrospection gather_introspection_{info_resource_,
                                             dockyard_proxy_.get()};
-  GatherMemory gather_memory_{root_resource_, dockyard_proxy_.get()};
-  GatherMemoryDigest gather_memory_digest_{root_resource_,
+  GatherMemory gather_memory_{info_resource_, dockyard_proxy_.get()};
+  GatherMemoryDigest gather_memory_digest_{info_resource_,
                                            dockyard_proxy_.get()};
-  GatherTasks gather_tasks_{root_resource_, dockyard_proxy_.get()};
-  GatherThreadsAndCpu gather_threads_and_cpu_{root_resource_,
+  GatherTasks gather_tasks_{info_resource_, dockyard_proxy_.get()};
+  GatherThreadsAndCpu gather_threads_and_cpu_{info_resource_,
                                               dockyard_proxy_.get()};
-  GatherProcessesAndMemory gather_processes_and_memory_{root_resource_,
+  GatherProcessesAndMemory gather_processes_and_memory_{info_resource_,
                                                         dockyard_proxy_.get()};
-  GatherVmos gather_vmos_{root_resource_, dockyard_proxy_.get(),
+  GatherVmos gather_vmos_{info_resource_, dockyard_proxy_.get(),
                           g_slow_data_task_tree, os_.get()};
 
   friend class ::SystemMonitorHarvesterTest;
diff --git a/src/developer/system_monitor/bin/harvester/harvester_fake.h b/src/developer/system_monitor/bin/harvester/harvester_fake.h
index 826ec0d..20fdbfb 100644
--- a/src/developer/system_monitor/bin/harvester/harvester_fake.h
+++ b/src/developer/system_monitor/bin/harvester/harvester_fake.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef GARNET_BIN_SYSTEM_MONITOR_HARVESTER_HARVESTER_FAKE_H_
-#define GARNET_BIN_SYSTEM_MONITOR_HARVESTER_HARVESTER_FAKE_H_
+#ifndef SRC_DEVELOPER_SYSTEM_MONITOR_BIN_HARVESTER_HARVESTER_FAKE_H_
+#define SRC_DEVELOPER_SYSTEM_MONITOR_BIN_HARVESTER_HARVESTER_FAKE_H_
 
 #include <lib/async/cpp/task.h>
 #include <lib/async/cpp/wait.h>
@@ -24,9 +24,9 @@
 
 class HarvesterFake : public Harvester {
  public:
-  HarvesterFake(zx_handle_t root_resource,
+  HarvesterFake(zx_handle_t info_resource,
                 std::unique_ptr<DockyardProxy> dockyard_proxy)
-      : Harvester(root_resource, /*dispatcher=*/nullptr,
+      : Harvester(info_resource, /*dispatcher=*/nullptr,
                   std::move(dockyard_proxy)) {}
 
   void GatherData() {}
@@ -37,4 +37,4 @@
 
 }  // namespace harvester
 
-#endif  // GARNET_BIN_SYSTEM_MONITOR_HARVESTER_HARVESTER_FAKE_H_
+#endif  // SRC_DEVELOPER_SYSTEM_MONITOR_BIN_HARVESTER_HARVESTER_FAKE_H_
diff --git a/src/developer/system_monitor/bin/harvester/harvester_main.cc b/src/developer/system_monitor/bin/harvester/harvester_main.cc
index 22f38566..afbed6e 100644
--- a/src/developer/system_monitor/bin/harvester/harvester_main.cc
+++ b/src/developer/system_monitor/bin/harvester/harvester_main.cc
@@ -16,8 +16,8 @@
 #include "dockyard_proxy_grpc.h"
 #include "dockyard_proxy_local.h"
 #include "harvester.h"
+#include "info_resource.h"
 #include "os.h"
-#include "root_resource.h"
 #include "src/lib/fxl/command_line.h"
 #include "src/lib/fxl/log_settings_command_line.h"
 #include "src/lib/fxl/strings/string_number_conversions.h"
@@ -96,8 +96,8 @@
     dockyard_proxy = std::make_unique<harvester::DockyardProxyLocal>();
   }
 
-  zx_handle_t root_resource;
-  zx_status_t ret = harvester::GetRootResource(&root_resource);
+  zx_handle_t info_resource;
+  zx_status_t ret = harvester::GetInfoResource(&info_resource);
   if (ret != ZX_OK) {
     exit(EXIT_CODE_GENERAL_ERROR);
   }
@@ -124,7 +124,7 @@
     exit(EXIT_CODE_GENERAL_ERROR);
   }
   FX_LOGS(INFO) << "main thread " << pthread_self();
-  harvester::Harvester harvester(root_resource, std::move(dockyard_proxy),
+  harvester::Harvester harvester(info_resource, std::move(dockyard_proxy),
                                  std::move(os));
   harvester.GatherDeviceProperties();
   harvester.GatherFastData(fast_calls_loop.dispatcher());
diff --git a/src/developer/system_monitor/bin/harvester/harvester_test.cc b/src/developer/system_monitor/bin/harvester/harvester_test.cc
index 608a483..1d5acd73 100644
--- a/src/developer/system_monitor/bin/harvester/harvester_test.cc
+++ b/src/developer/system_monitor/bin/harvester/harvester_test.cc
@@ -13,8 +13,8 @@
 #include <gtest/gtest.h>
 
 #include "dockyard_proxy_fake.h"
+#include "info_resource.h"
 #include "os.h"
-#include "root_resource.h"
 
 namespace {
 
@@ -37,13 +37,13 @@
         std::make_unique<harvester::DockyardProxyFake>();
     std::unique_ptr<harvester::OS> os = std::make_unique<harvester::OSImpl>();
 
-    EXPECT_EQ(harvester::GetRootResource(&root_resource), ZX_OK);
+    EXPECT_EQ(harvester::GetInfoResource(&info_resource), ZX_OK);
     test_harvester = std::make_unique<harvester::Harvester>(
-        root_resource, std::move(dockyard_proxy), std::move(os));
+        info_resource, std::move(dockyard_proxy), std::move(os));
   }
 
-  zx_handle_t GetHarvesterRootResource() const {
-    return test_harvester->root_resource_;
+  zx_handle_t GetHarvesterInfoResource() const {
+    return test_harvester->info_resource_;
   }
   zx::duration GetGatherThreadsAndCpuPeriod() const {
     return test_harvester->gather_threads_and_cpu_.update_period_;
@@ -63,13 +63,13 @@
 
   std::unique_ptr<harvester::Harvester> test_harvester;
   async::Loop loop{&kAsyncLoopConfigNoAttachToCurrentThread};
-  zx_handle_t root_resource;
+  zx_handle_t info_resource;
 };
 
 TEST_F(SystemMonitorHarvesterTest, CreateHarvester) {
   AsyncDispatcherFake fast_dispatcher;
   AsyncDispatcherFake slow_dispatcher;
-  EXPECT_EQ(root_resource, GetHarvesterRootResource());
+  EXPECT_EQ(info_resource, GetHarvesterInfoResource());
 
   test_harvester->GatherFastData(&fast_dispatcher);
   EXPECT_EQ(zx::msec(100), GetGatherThreadsAndCpuPeriod());
@@ -91,13 +91,13 @@
     dockyard_proxy = dockyard_proxy_ptr.get();
     std::unique_ptr<harvester::OS> os = std::make_unique<harvester::OSImpl>();
 
-    EXPECT_EQ(harvester::GetRootResource(&root_resource), ZX_OK);
+    EXPECT_EQ(harvester::GetInfoResource(&info_resource), ZX_OK);
     test_harvester = std::make_unique<harvester::Harvester>(
-        root_resource, std::move(dockyard_proxy_ptr), std::move(os));
+        info_resource, std::move(dockyard_proxy_ptr), std::move(os));
   }
 
   std::unique_ptr<harvester::Harvester> test_harvester;
-  zx_handle_t root_resource;
+  zx_handle_t info_resource;
   harvester::DockyardProxyFake* dockyard_proxy;
 };
 
diff --git a/src/developer/system_monitor/bin/harvester/info_resource.cc b/src/developer/system_monitor/bin/harvester/info_resource.cc
new file mode 100644
index 0000000..a39436e
--- /dev/null
+++ b/src/developer/system_monitor/bin/harvester/info_resource.cc
@@ -0,0 +1,45 @@
+// Copyright 2019 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "info_resource.h"
+
+#include <fcntl.h>
+#include <fuchsia/kernel/cpp/fidl.h>
+#include <lib/fdio/directory.h>
+#include <lib/fdio/fdio.h>
+#include <lib/syslog/cpp/macros.h>
+#include <lib/zx/channel.h>
+
+namespace harvester {
+
+zx_status_t GetInfoResource(zx_handle_t* info_resource_handle) {
+  zx::channel local, remote;
+  zx_status_t status = zx::channel::create(0, &local, &remote);
+  if (status != ZX_OK) {
+    FX_LOGS(ERROR) << "Cannot create a channel.";
+    return status;
+  }
+
+  static const std::string kVmexResourcePath =
+      "/svc/" + std::string(fuchsia::kernel::InfoResource::Name_);
+  status = fdio_service_connect(kVmexResourcePath.c_str(), remote.release());
+  if (status != ZX_OK) {
+    FX_LOGS(ERROR) << "Cannot open fuchsia.kernel.InfoResource."
+                   << zx_status_get_string(status);
+    return ZX_ERR_NOT_FOUND;
+  }
+
+  fuchsia::kernel::InfoResource_SyncProxy proxy(std::move(local));
+  zx::resource info_resource;
+  status = proxy.Get(&info_resource);
+  if (status != ZX_OK) {
+    FX_LOGS(ERROR) << "FIDL issue while trying to get info resource: "
+                   << zx_status_get_string(status);
+    return status;
+  }
+  *info_resource_handle = info_resource.release();
+  return ZX_OK;
+}
+
+}  // namespace harvester
diff --git a/src/developer/system_monitor/bin/harvester/info_resource.h b/src/developer/system_monitor/bin/harvester/info_resource.h
new file mode 100644
index 0000000..ceccdc04
--- /dev/null
+++ b/src/developer/system_monitor/bin/harvester/info_resource.h
@@ -0,0 +1,19 @@
+// Copyright 2019 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SRC_DEVELOPER_SYSTEM_MONITOR_BIN_HARVESTER_INFO_RESOURCE_H_
+#define SRC_DEVELOPER_SYSTEM_MONITOR_BIN_HARVESTER_INFO_RESOURCE_H_
+
+#include <zircon/status.h>
+#include <zircon/types.h>
+
+namespace harvester {
+
+// Get a handle to the info resource, which can be used to find its children
+// and so on to review a tree of resources.
+zx_status_t GetInfoResource(zx_handle_t* info_resource_handle);
+
+}  // namespace harvester
+
+#endif  // SRC_DEVELOPER_SYSTEM_MONITOR_BIN_HARVESTER_INFO_RESOURCE_H_
diff --git a/src/developer/system_monitor/bin/harvester/root_resource_test.cc b/src/developer/system_monitor/bin/harvester/info_resource_test.cc
similarity index 73%
rename from src/developer/system_monitor/bin/harvester/root_resource_test.cc
rename to src/developer/system_monitor/bin/harvester/info_resource_test.cc
index 4afed9c..c43b24c 100644
--- a/src/developer/system_monitor/bin/harvester/root_resource_test.cc
+++ b/src/developer/system_monitor/bin/harvester/info_resource_test.cc
@@ -2,24 +2,24 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "root_resource.h"
+#include "info_resource.h"
 
 #include <lib/zx/time.h>
 
 #include <gtest/gtest.h>
 
-class SystemMonitorRootResourceTest : public ::testing::Test {};
+class SystemMonitorInfoResourceTest : public ::testing::Test {};
 
-TEST_F(SystemMonitorRootResourceTest, GatherData) {
-  zx_handle_t root_resource;
-  zx_status_t status = harvester::GetRootResource(&root_resource);
+TEST_F(SystemMonitorInfoResourceTest, GatherData) {
+  zx_handle_t info_resource;
+  zx_status_t status = harvester::GetInfoResource(&info_resource);
   EXPECT_EQ(status, ZX_OK);
-  EXPECT_NE(root_resource, ZX_HANDLE_INVALID);
+  EXPECT_NE(info_resource, ZX_HANDLE_INVALID);
 
   // Arbitrary choice of system calls to try out the handle.
   zx_info_cpu_stats_t stats;
   size_t actual, avail;
-  status = zx_object_get_info(root_resource, ZX_INFO_CPU_STATS, &stats,
+  status = zx_object_get_info(info_resource, ZX_INFO_CPU_STATS, &stats,
                               sizeof(stats), &actual, &avail);
   EXPECT_EQ(status, ZX_OK);
   // This test is not about this data, so only a few sanity checks are
diff --git a/src/developer/system_monitor/bin/harvester/meta/system_monitor_harvester.cmx b/src/developer/system_monitor/bin/harvester/meta/system_monitor_harvester.cmx
index 63a18fe..dec768b 100644
--- a/src/developer/system_monitor/bin/harvester/meta/system_monitor_harvester.cmx
+++ b/src/developer/system_monitor/bin/harvester/meta/system_monitor_harvester.cmx
@@ -11,8 +11,8 @@
             "hub"
         ],
         "services": [
-            "fuchsia.boot.RootResource",
             "fuchsia.diagnostics.ArchiveAccessor",
+            "fuchsia.kernel.InfoResource",
             "fuchsia.kernel.RootJob",
             "fuchsia.kernel.RootJobForInspect",
             "fuchsia.kernel.Stats",
diff --git a/src/developer/system_monitor/bin/harvester/meta/system_monitor_harvester_test.cmx b/src/developer/system_monitor/bin/harvester/meta/system_monitor_harvester_test.cmx
index ff6df26..6ec428c 100644
--- a/src/developer/system_monitor/bin/harvester/meta/system_monitor_harvester_test.cmx
+++ b/src/developer/system_monitor/bin/harvester/meta/system_monitor_harvester_test.cmx
@@ -2,7 +2,7 @@
     "facets": {
         "fuchsia.test": {
             "system-services": [
-                "fuchsia.boot.RootResource",
+                "fuchsia.kernel.InfoResource",
                 "fuchsia.kernel.RootJob",
                 "fuchsia.kernel.RootJobForInspect",
                 "fuchsia.kernel.Stats",
@@ -22,14 +22,14 @@
             "hub"
         ],
         "services": [
-            "fuchsia.boot.RootResource",
             "fuchsia.diagnostics.ArchiveAccessor",
+            "fuchsia.kernel.InfoResource",
             "fuchsia.kernel.RootJob",
             "fuchsia.kernel.RootJobForInspect",
             "fuchsia.kernel.Stats",
-            "fuchsia.sysinfo.SysInfo",
             "fuchsia.logger.LogSink",
-            "fuchsia.sys.Environment"
+            "fuchsia.sys.Environment",
+            "fuchsia.sysinfo.SysInfo"
         ]
     }
 }
diff --git a/src/developer/system_monitor/bin/harvester/root_resource.cc b/src/developer/system_monitor/bin/harvester/root_resource.cc
deleted file mode 100644
index 497b2db..0000000
--- a/src/developer/system_monitor/bin/harvester/root_resource.cc
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2019 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "root_resource.h"
-
-#include <fcntl.h>
-#include <fuchsia/boot/c/fidl.h>
-#include <lib/fdio/directory.h>
-#include <lib/fdio/fdio.h>
-#include <lib/syslog/cpp/macros.h>
-#include <lib/zx/channel.h>
-
-namespace harvester {
-
-zx_status_t GetRootResource(zx_handle_t* root_resource) {
-  zx::channel local, remote;
-  zx_status_t status = zx::channel::create(0, &local, &remote);
-  if (status != ZX_OK) {
-    FX_LOGS(ERROR) << "Cannot create a channel.";
-    return status;
-  }
-  const char* root_resource_svc = "/svc/fuchsia.boot.RootResource";
-  status = fdio_service_connect(root_resource_svc, remote.release());
-  if (status != ZX_OK) {
-    FX_LOGS(ERROR) << "Cannot open fuchsia.boot.RootResource."
-                   << zx_status_get_string(status);
-    return ZX_ERR_NOT_FOUND;
-  }
-
-  zx_status_t fidl_status =
-      fuchsia_boot_RootResourceGet(local.get(), root_resource);
-  if (fidl_status != ZX_OK) {
-    FX_LOGS(ERROR) << "FIDL issue while trying to get root resource: "
-                   << zx_status_get_string(fidl_status);
-    return fidl_status;
-  }
-  return ZX_OK;
-}
-
-}  // namespace harvester
diff --git a/src/developer/system_monitor/bin/harvester/root_resource.h b/src/developer/system_monitor/bin/harvester/root_resource.h
deleted file mode 100644
index dc83bed..0000000
--- a/src/developer/system_monitor/bin/harvester/root_resource.h
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2019 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef GARNET_BIN_SYSTEM_MONITOR_HARVESTER_ROOT_RESOURCE_H_
-#define GARNET_BIN_SYSTEM_MONITOR_HARVESTER_ROOT_RESOURCE_H_
-
-#include <zircon/status.h>
-#include <zircon/types.h>
-
-namespace harvester {
-
-// Get a handle to the root resource, which can be used to find its children
-// and so on to review a tree of resources.
-zx_status_t GetRootResource(zx_handle_t* root_resource);
-
-}  // namespace harvester
-
-#endif  // GARNET_BIN_SYSTEM_MONITOR_HARVESTER_ROOT_RESOURCE_H_
diff --git a/src/developer/system_monitor/bin/harvester/task_tree_test.cc b/src/developer/system_monitor/bin/harvester/task_tree_test.cc
index 4a1038c..ea197f5 100644
--- a/src/developer/system_monitor/bin/harvester/task_tree_test.cc
+++ b/src/developer/system_monitor/bin/harvester/task_tree_test.cc
@@ -9,7 +9,7 @@
 
 #include <gtest/gtest.h>
 
-#include "root_resource.h"
+#include "info_resource.h"
 
 class TaskTreeForTesting : public ::harvester::TaskTree {
  public:
diff --git a/src/devices/bin/driver_host/inspect_test.cc b/src/devices/bin/driver_host/inspect_test.cc
index 5171d23..60d66f6 100644
--- a/src/devices/bin/driver_host/inspect_test.cc
+++ b/src/devices/bin/driver_host/inspect_test.cc
@@ -31,7 +31,7 @@
   uint8_t buffer[4096];
   size_t length;
   {
-    fs::vdircookie_t cookie = {};
+    fs::VdirCookie cookie;
     EXPECT_EQ(inspect().diagnostics_dir().Readdir(&cookie, buffer, sizeof(buffer), &length), ZX_OK);
     fs::DirentChecker dc(buffer, length);
     dc.ExpectEntry(".", V_TYPE_DIR);
diff --git a/src/devices/bin/driver_manager/inspect_test.cc b/src/devices/bin/driver_manager/inspect_test.cc
index 9587401..0b8920b 100644
--- a/src/devices/bin/driver_manager/inspect_test.cc
+++ b/src/devices/bin/driver_manager/inspect_test.cc
@@ -29,9 +29,10 @@
   uint8_t buffer[4096];
   size_t length;
   {
-    fs::vdircookie_t cookie = {};
+    fs::VdirCookie cookie;
     EXPECT_EQ(inspect_manager().diagnostics_dir().Readdir(&cookie, buffer, sizeof(buffer), &length),
               ZX_OK);
+
     fs::DirentChecker dc(buffer, length);
     dc.ExpectEntry(".", V_TYPE_DIR);
     dc.ExpectEntry("driver_manager", V_TYPE_DIR);
@@ -43,8 +44,10 @@
   {
     fbl::RefPtr<fs::Vnode> node;
     inspect_manager().diagnostics_dir().Lookup("driver_manager", &node);
-    fs::vdircookie_t cookie = {};
+
+    fs::VdirCookie cookie;
     EXPECT_EQ(node->Readdir(&cookie, buffer, sizeof(buffer), &length), ZX_OK);
+
     fs::DirentChecker dc(buffer, length);
     dc.ExpectEntry(".", V_TYPE_DIR);
     dc.ExpectEntry("driver_host", V_TYPE_DIR);
@@ -182,8 +185,10 @@
     ASSERT_NE(dir, nullptr);
     ASSERT_NE(seqcount, nullptr);
     ASSERT_EQ(*seqcount, 1);
-    fs::vdircookie_t cookie = {};
+
+    fs::VdirCookie cookie;
     EXPECT_EQ(dir->Readdir(&cookie, buffer, sizeof(buffer), &length), ZX_OK);
+
     fs::DirentChecker dc(buffer, length);
     dc.ExpectEntry(".", V_TYPE_DIR);
     dc.ExpectEntry("000.inspect", V_TYPE_FILE);
@@ -223,8 +228,10 @@
     ASSERT_NE(dir, nullptr);
     ASSERT_NE(seqcount, nullptr);
     ASSERT_EQ(*seqcount, 1);
-    fs::vdircookie_t cookie = {};
+
+    fs::VdirCookie cookie;
     EXPECT_EQ(dir->Readdir(&cookie, buffer, sizeof(buffer), &length), ZX_OK);
+
     fs::DirentChecker dc(buffer, length);
     dc.ExpectEntry(".", V_TYPE_DIR);
     dc.ExpectEntry("000.inspect", V_TYPE_FILE);
diff --git a/src/devices/usb/drivers/usb-peripheral/BUILD.gn b/src/devices/usb/drivers/usb-peripheral/BUILD.gn
index 504df90..896d95c 100644
--- a/src/devices/usb/drivers/usb-peripheral/BUILD.gn
+++ b/src/devices/usb/drivers/usb-peripheral/BUILD.gn
@@ -2,10 +2,18 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+import("//build/bind/bind.gni")
 import("//build/config/fuchsia/rules.gni")
 import("//build/test.gni")
 import("//build/test/test_package.gni")
 
+bind_rules("usb_peripheral_bind") {
+  rules = "usb_peripheral.bind"
+  output = "usb_peripheral-bind.h"
+  tests = "tests.json"
+  deps = [ "//src/devices/bind/fuchsia.usb" ]
+}
+
 driver_module("usb-peripheral") {
   configs += [
     "//build/config:all_source",
@@ -17,6 +25,7 @@
     "usb-peripheral.cc",
   ]
   deps = [
+    ":usb_peripheral_bind",
     "//sdk/banjo/ddk.protocol.usb",
     "//sdk/banjo/ddk.protocol.usb.dci",
     "//sdk/banjo/ddk.protocol.usb.function",
@@ -27,9 +36,6 @@
     "//src/devices/usb/lib/usb",
     "//src/lib/ddk",
     "//src/lib/ddk:ddk-metadata",
-
-    # TODO(fxb/38132): Migrate to the new bind rules and delete the below
-    "//src/lib/ddk:ddk-deprecated-binding-headers",
     "//src/lib/ddktl",
     "//zircon/public/lib/fbl",
     "//zircon/public/lib/fidl",
@@ -54,6 +60,7 @@
     "usb-peripheral.cc",
   ]
   deps = [
+    ":usb_peripheral_bind",
     "//sdk/banjo/ddk.protocol.composite",
     "//sdk/banjo/ddk.protocol.platform.device",
     "//sdk/banjo/ddk.protocol.usb",
@@ -71,10 +78,10 @@
     "//src/devices/testing/fake_ddk",
     "//src/devices/usb/lib/usb",
     "//src/lib/ddk",
-    "//src/lib/ddk:ddk-metadata",
 
     # TODO(fxb/38132): Migrate to the new bind rules and delete the below
     "//src/lib/ddk:ddk-deprecated-binding-headers",
+    "//src/lib/ddk:ddk-metadata",
     "//src/lib/ddktl",
     "//zircon/public/lib/fbl",
     "//zircon/public/lib/hwreg",
@@ -99,5 +106,8 @@
 
 group("tests") {
   testonly = true
-  deps = [ ":usb-peripheral-unittest-package" ]
+  deps = [
+    ":usb-peripheral-unittest-package",
+    ":usb_peripheral_bind_test",
+  ]
 }
diff --git a/src/devices/usb/drivers/usb-peripheral/tests.json b/src/devices/usb/drivers/usb-peripheral/tests.json
new file mode 100644
index 0000000..45957c4
--- /dev/null
+++ b/src/devices/usb/drivers/usb-peripheral/tests.json
@@ -0,0 +1,16 @@
+[
+    {
+        "name": "Invalid Protocol",
+        "expected": "abort",
+        "device": {
+            "fuchsia.BIND_PROTOCOL": "fuchsia.bluetooth.BIND_PROTOCOL.DEVICE"
+        }
+    },
+    {
+        "name": "usb peripheral",
+        "expected": "match",
+        "device": {
+            "fuchsia.BIND_PROTOCOL": "fuchsia.usb.BIND_PROTOCOL.DCI"
+        }
+    }
+]
\ No newline at end of file
diff --git a/src/devices/usb/drivers/usb-peripheral/usb-peripheral.cc b/src/devices/usb/drivers/usb-peripheral/usb-peripheral.cc
index 6ff8a01..fb633155 100644
--- a/src/devices/usb/drivers/usb-peripheral/usb-peripheral.cc
+++ b/src/devices/usb/drivers/usb-peripheral/usb-peripheral.cc
@@ -16,7 +16,6 @@
 #include <zircon/hw/usb/cdc.h>
 #include <zircon/listnode.h>
 
-#include <ddk/binding.h>
 #include <ddk/debug.h>
 #include <ddk/device.h>
 #include <ddk/driver.h>
@@ -31,6 +30,7 @@
 #include <fbl/auto_lock.h>
 #include <fbl/ref_ptr.h>
 
+#include "src/devices/usb/drivers/usb-peripheral/usb_peripheral-bind.h"
 #include "usb-function.h"
 
 namespace peripheral = ::llcpp::fuchsia::hardware::usb::peripheral;
@@ -1033,7 +1033,4 @@
 
 }  // namespace usb_peripheral
 
-// clang-format off
-ZIRCON_DRIVER_BEGIN(usb_device, usb_peripheral::ops, "zircon", "0.1", 1)
-    BI_MATCH_IF(EQ, BIND_PROTOCOL, ZX_PROTOCOL_USB_DCI),
-ZIRCON_DRIVER_END(usb_device)
+ZIRCON_DRIVER(usb_device, usb_peripheral::ops, "zircon", "0.1");
diff --git a/src/devices/usb/drivers/usb-peripheral/usb_peripheral.bind b/src/devices/usb/drivers/usb-peripheral/usb_peripheral.bind
new file mode 100644
index 0000000..485a1df
--- /dev/null
+++ b/src/devices/usb/drivers/usb-peripheral/usb_peripheral.bind
@@ -0,0 +1,7 @@
+// Copyright 2020 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+using fuchsia.usb;
+
+fuchsia.BIND_PROTOCOL == fuchsia.usb.BIND_PROTOCOL.DCI; 
diff --git a/src/fidl/OWNERS b/src/fidl/OWNERS
index 3aa2be4f..7b06bb94f 100644
--- a/src/fidl/OWNERS
+++ b/src/fidl/OWNERS
@@ -1,3 +1,4 @@
+azaslavsky@google.com
 bprosnitz@google.com
 fcz@google.com
 ianloic@google.com
diff --git a/src/firmware/gigaboot/LICENSE b/src/firmware/gigaboot/LICENSE
index 294047c..47a55df 100644
--- a/src/firmware/gigaboot/LICENSE
+++ b/src/firmware/gigaboot/LICENSE
@@ -10,9 +10,6 @@
      copyright notice, this list of conditions and the following
      disclaimer in the documentation and/or other materials provided
      with the distribution.
-   * Neither the name of Google Inc. nor the names of its
-     contributors may be used to endorse or promote products derived
-     from this software without specific prior written permission.
 
 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
diff --git a/src/lib/timekeeper/clock.h b/src/lib/timekeeper/clock.h
index 94a8c5d..b0f4207 100644
--- a/src/lib/timekeeper/clock.h
+++ b/src/lib/timekeeper/clock.h
@@ -29,12 +29,22 @@
     return status;
   }
 
+  // Returns the current UTC time.
+  zx_status_t UtcNow(zx::time_utc* result) const {
+    zx_time_t time;
+    zx_status_t status = GetUtcTime(&time);
+    *result = zx::time_utc(time);
+    return status;
+  }
+
   // Returns the current monotonic time. See |zx_clock_get_monotonic|.
   zx::time Now() const { return zx::time(GetMonotonicTime()); }
 
  protected:
   // Returns the current time for |kClockId|. See |zx_clock_get|.
   virtual zx_status_t GetTime(zx_clock_t clock_id, zx_time_t* time) const = 0;
+  // Returns the current UTC time.
+  virtual zx_status_t GetUtcTime(zx_time_t* time) const = 0;
   // Returns the current monotonic time. See |zx_clock_get_monotonic|.
   virtual zx_time_t GetMonotonicTime() const = 0;
 };
diff --git a/src/lib/timekeeper/monotonic_test_clock_base.cc b/src/lib/timekeeper/monotonic_test_clock_base.cc
index 1945da10..514b5cf 100644
--- a/src/lib/timekeeper/monotonic_test_clock_base.cc
+++ b/src/lib/timekeeper/monotonic_test_clock_base.cc
@@ -27,6 +27,11 @@
   return ZX_OK;
 }
 
+zx_status_t MonotonicTestClockBase::GetUtcTime(zx_time_t* time) const {
+  *time = GetClockStartingValue(ZX_CLOCK_UTC) + GetMonotonicTime();
+  return ZX_OK;
+}
+
 zx_time_t MonotonicTestClockBase::GetMonotonicTime() const {
   zx_time_t result = std::max(clock_(), last_returned_value_ + 1);
   last_returned_value_ = result;
diff --git a/src/lib/timekeeper/monotonic_test_clock_base.h b/src/lib/timekeeper/monotonic_test_clock_base.h
index aba8cc9..083f701 100644
--- a/src/lib/timekeeper/monotonic_test_clock_base.h
+++ b/src/lib/timekeeper/monotonic_test_clock_base.h
@@ -21,6 +21,7 @@
 
  private:
   zx_status_t GetTime(zx_clock_t clock_id, zx_time_t* time) const final;
+  zx_status_t GetUtcTime(zx_time_t* time) const final;
   zx_time_t GetMonotonicTime() const final;
 
   fit::function<zx_time_t()> clock_;
diff --git a/src/lib/timekeeper/system_clock.h b/src/lib/timekeeper/system_clock.h
index acf4a32..2c95371 100644
--- a/src/lib/timekeeper/system_clock.h
+++ b/src/lib/timekeeper/system_clock.h
@@ -6,17 +6,23 @@
 #define SRC_LIB_TIMEKEEPER_SYSTEM_CLOCK_H_
 
 #include <lib/timekeeper/clock.h>
+#include <lib/zx/clock.h>
 #include <lib/zx/time.h>
+#include <zircon/utc.h>
 
 namespace timekeeper {
 
-// Implementation of |Clock| using the clock related syscalls.
+// Implementation of |Clock| using the clock related syscalls and UTC clock passed
+// to the process on launch.
 class SystemClock : public Clock {
  private:
   zx_status_t GetTime(zx_clock_t clock_id, zx_time_t* time) const override {
     return zx_clock_get(clock_id, time);
   }
+  zx_status_t GetUtcTime(zx_time_t* time) const override { return utc_clock_->read(time); }
   zx_time_t GetMonotonicTime() const override { return zx_clock_get_monotonic(); }
+
+  zx::unowned_clock utc_clock_ = zx::unowned_clock(zx_utc_reference_get());
 };
 
 }  // namespace timekeeper
diff --git a/src/lib/timekeeper/system_clock_unittest.cc b/src/lib/timekeeper/system_clock_unittest.cc
index 5b88f6b..4305a3a0 100644
--- a/src/lib/timekeeper/system_clock_unittest.cc
+++ b/src/lib/timekeeper/system_clock_unittest.cc
@@ -38,5 +38,14 @@
   EXPECT_GE(time2, time1);
 }
 
+TEST(SystemClockTest, UtcNow) {
+  SystemClock clock;
+
+  zx::time_utc time1;
+  ASSERT_EQ(ZX_OK, clock.UtcNow(&time1));
+
+  EXPECT_GT(time1, zx::time_utc(0));
+}
+
 }  // namespace
 }  // namespace timekeeper
diff --git a/src/lib/timekeeper/test_clock.cc b/src/lib/timekeeper/test_clock.cc
index e8adedb..cc0ecf0 100644
--- a/src/lib/timekeeper/test_clock.cc
+++ b/src/lib/timekeeper/test_clock.cc
@@ -15,6 +15,11 @@
   return ZX_OK;
 }
 
+zx_status_t TestClock::GetUtcTime(zx_time_t* time) const {
+  *time = current_time_;
+  return ZX_OK;
+}
+
 zx_time_t TestClock::GetMonotonicTime() const { return current_time_; }
 
 }  // namespace timekeeper
diff --git a/src/lib/timekeeper/test_clock.h b/src/lib/timekeeper/test_clock.h
index 91bec38..b1b79d2 100644
--- a/src/lib/timekeeper/test_clock.h
+++ b/src/lib/timekeeper/test_clock.h
@@ -22,6 +22,7 @@
 
  private:
   zx_status_t GetTime(zx_clock_t clock_id, zx_time_t* time) const override;
+  zx_status_t GetUtcTime(zx_time_t* time) const override;
   zx_time_t GetMonotonicTime() const override;
 
   zx_time_t current_time_;
diff --git a/src/lib/timekeeper/test_clock_unittest.cc b/src/lib/timekeeper/test_clock_unittest.cc
index f8a3794..0fa842a 100644
--- a/src/lib/timekeeper/test_clock_unittest.cc
+++ b/src/lib/timekeeper/test_clock_unittest.cc
@@ -25,6 +25,12 @@
   EXPECT_EQ(ZX_OK, clock.Now(&t3));
 
   EXPECT_EQ(t1.get(), t3.get());
+
+  zx::time_utc t4;
+
+  EXPECT_EQ(ZX_OK, clock.UtcNow(&t4));
+
+  EXPECT_EQ(t3, t4);
 }
 
 }  // namespace
diff --git a/src/security/policy/info_resource_allowlist_eng.txt b/src/security/policy/info_resource_allowlist_eng.txt
index 82324ad..37ceb8a 100644
--- a/src/security/policy/info_resource_allowlist_eng.txt
+++ b/src/security/policy/info_resource_allowlist_eng.txt
@@ -1,4 +1,8 @@
+# Components
+fuchsia-pkg://fuchsia.com/system_monitor_harvester#meta/system_monitor_harvester.cmx
+# Component Tests
 fuchsia-pkg://fuchsia.com/component-manager-tests#meta/component_manager_boot_env_tests.cmx
 fuchsia-pkg://fuchsia.com/loadbench#meta/loadbench.cmx
 fuchsia-pkg://fuchsia.com/loadbench_tests#meta/loadbench_unittests.cmx
 fuchsia-pkg://fuchsia.com/policy-integration-tests#meta/info_resource_allowed.cmx
+fuchsia-pkg://fuchsia.com/system_monitor_harvester_tests#meta/system_monitor_harvester_test.cmx
diff --git a/src/security/policy/root_resource_allowlist_eng.txt b/src/security/policy/root_resource_allowlist_eng.txt
index b89d3d0..b25e418 100644
--- a/src/security/policy/root_resource_allowlist_eng.txt
+++ b/src/security/policy/root_resource_allowlist_eng.txt
@@ -1,8 +1,6 @@
 # Components
-fuchsia-pkg://fuchsia.com/system_monitor_harvester#meta/system_monitor_harvester.cmx
 fuchsia-pkg://fuchsia.com/thermd#meta/thermd.cmx
 # Component Tests
 fuchsia-pkg://fuchsia.com/component-manager-tests#meta/component_manager_boot_env_tests.cmx
 fuchsia-pkg://fuchsia.com/platform-bus-bti-test#meta/platform-bus-bti-test.cmx
 fuchsia-pkg://fuchsia.com/policy-integration-tests#meta/root_resource_allowed.cmx
-fuchsia-pkg://fuchsia.com/system_monitor_harvester_tests#meta/system_monitor_harvester_test.cmx
diff --git a/src/storage/bin/fvm/main.cc b/src/storage/bin/fvm/main.cc
index 15190cd..04432fb 100644
--- a/src/storage/bin/fvm/main.cc
+++ b/src/storage/bin/fvm/main.cc
@@ -2,6 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include <lib/fit/defer.h>
 #include <unistd.h>
 
 #include <cstdint>
@@ -598,6 +599,7 @@
       }
       std::string tmp_path = std::filesystem::temp_directory_path().generic_string() +
                              "/decompressed_sparse_fvm_XXXXXX";
+
       fbl::unique_fd created_file(mkstemp(tmp_path.data()));
 
       if (!created_file.is_valid()) {
@@ -606,6 +608,12 @@
         return -1;
       }
 
+      auto cleanup_temp = fit::defer([tmp_path]() {
+        if (unlink(tmp_path.c_str()) < 0) {
+          fprintf(stderr, "Failed to delete temp file '%s': %s\n", tmp_path.c_str(),
+                  strerror(errno));
+        }
+      });
       if (compressedContainer->Decompress(tmp_path.c_str()) != ZX_OK) {
         return -1;
       }
diff --git a/src/storage/blobfs/blobfs.cc b/src/storage/blobfs/blobfs.cc
index 3224b9c..bf6a631 100644
--- a/src/storage/blobfs/blobfs.cc
+++ b/src/storage/blobfs/blobfs.cc
@@ -561,11 +561,10 @@
   return fs_id_.duplicate(ZX_RIGHTS_BASIC, out_fs_id);
 }
 
-static_assert(sizeof(DirectoryCookie) <= sizeof(fs::vdircookie_t),
+static_assert(sizeof(DirectoryCookie) <= sizeof(fs::VdirCookie),
               "Blobfs dircookie too large to fit in IO state");
 
-zx_status_t Blobfs::Readdir(fs::vdircookie_t* cookie, void* dirents, size_t len,
-                            size_t* out_actual) {
+zx_status_t Blobfs::Readdir(fs::VdirCookie* cookie, void* dirents, size_t len, size_t* out_actual) {
   TRACE_DURATION("blobfs", "Blobfs::Readdir", "len", len);
   fs::DirentFiller df(dirents, len);
   DirectoryCookie* c = reinterpret_cast<DirectoryCookie*>(cookie);
diff --git a/src/storage/blobfs/blobfs.h b/src/storage/blobfs/blobfs.h
index 8b9ec05..655a5e6 100644
--- a/src/storage/blobfs/blobfs.h
+++ b/src/storage/blobfs/blobfs.h
@@ -161,7 +161,7 @@
 
   BlobCache& Cache() { return blob_cache_; }
 
-  zx_status_t Readdir(fs::vdircookie_t* cookie, void* dirents, size_t len, size_t* out_actual);
+  zx_status_t Readdir(fs::VdirCookie* cookie, void* dirents, size_t len, size_t* out_actual);
 
   BlockDevice* Device() const { return block_device_.get(); }
 
diff --git a/src/storage/blobfs/directory.cc b/src/storage/blobfs/directory.cc
index fcacdb9..df12b2e 100644
--- a/src/storage/blobfs/directory.cc
+++ b/src/storage/blobfs/directory.cc
@@ -42,7 +42,7 @@
 
 fs::VnodeProtocolSet Directory::GetProtocols() const { return fs::VnodeProtocol::kDirectory; }
 
-zx_status_t Directory::Readdir(fs::vdircookie_t* cookie, void* dirents, size_t len,
+zx_status_t Directory::Readdir(fs::VdirCookie* cookie, void* dirents, size_t len,
                                size_t* out_actual) {
   return blobfs_->Readdir(cookie, dirents, len, out_actual);
 }
diff --git a/src/storage/blobfs/directory.h b/src/storage/blobfs/directory.h
index ece53ad..b9bf256 100644
--- a/src/storage/blobfs/directory.h
+++ b/src/storage/blobfs/directory.h
@@ -43,8 +43,7 @@
   zx_status_t GetNodeInfoForProtocol(fs::VnodeProtocol protocol, fs::Rights rights,
                                      fs::VnodeRepresentation* info) final;
   fs::VnodeProtocolSet GetProtocols() const final;
-  zx_status_t Readdir(fs::vdircookie_t* cookie, void* dirents, size_t len,
-                      size_t* out_actual) final;
+  zx_status_t Readdir(fs::VdirCookie* cookie, void* dirents, size_t len, size_t* out_actual) final;
   zx_status_t Read(void* data, size_t len, size_t off, size_t* out_actual) final;
   zx_status_t Write(const void* data, size_t len, size_t offset, size_t* out_actual) final;
   zx_status_t Append(const void* data, size_t len, size_t* out_end, size_t* out_actual) final;
diff --git a/src/storage/factory/factoryfs/directory.cc b/src/storage/factory/factoryfs/directory.cc
index c3b9343..3f19659 100644
--- a/src/storage/factory/factoryfs/directory.cc
+++ b/src/storage/factory/factoryfs/directory.cc
@@ -27,7 +27,7 @@
   return ZX_ERR_NOT_SUPPORTED;
 }
 
-zx_status_t Directory::Readdir(fs::vdircookie_t* cookie, void* dirents, size_t len,
+zx_status_t Directory::Readdir(fs::VdirCookie* cookie, void* dirents, size_t len,
                                size_t* out_actual) {
   // TODO(manalib)
   return ZX_ERR_NOT_SUPPORTED;
diff --git a/src/storage/factory/factoryfs/directory.h b/src/storage/factory/factoryfs/directory.h
index 647e181..3415e54 100644
--- a/src/storage/factory/factoryfs/directory.h
+++ b/src/storage/factory/factoryfs/directory.h
@@ -53,8 +53,7 @@
   zx_status_t Rename(fbl::RefPtr<fs::Vnode> newdir, fbl::StringPiece oldname,
                      fbl::StringPiece newname, bool src_must_be_dir, bool dst_must_be_dir) final;
 
-  zx_status_t Readdir(fs::vdircookie_t* cookie, void* dirents, size_t len,
-                      size_t* out_actual) final;
+  zx_status_t Readdir(fs::VdirCookie* cookie, void* dirents, size_t len, size_t* out_actual) final;
   zx_status_t Read(void* data, size_t len, size_t off, size_t* out_actual) final;
   zx_status_t Write(const void* data, size_t len, size_t offset, size_t* out_actual) final;
   zx_status_t Append(const void* data, size_t len, size_t* out_end, size_t* out_actual) final;
diff --git a/src/storage/minfs/directory.cc b/src/storage/minfs/directory.cc
index 573bfbd..d1c9409 100644
--- a/src/storage/minfs/directory.cc
+++ b/src/storage/minfs/directory.cc
@@ -528,10 +528,10 @@
   uint32_t seqno;     // inode seq no
 };
 
-static_assert(sizeof(DirCookie) <= sizeof(fs::vdircookie_t),
+static_assert(sizeof(DirCookie) <= sizeof(fs::VdirCookie),
               "MinFS DirCookie too large to fit in IO state");
 
-zx_status_t Directory::Readdir(fs::vdircookie_t* cookie, void* dirents, size_t len,
+zx_status_t Directory::Readdir(fs::VdirCookie* cookie, void* dirents, size_t len,
                                size_t* out_actual) {
   TRACE_DURATION("minfs", "Directory::Readdir");
   FX_LOGS(DEBUG) << "minfs_readdir() vn=" << this << "(#" << GetIno() << ") cookie=" << cookie
diff --git a/src/storage/minfs/directory.h b/src/storage/minfs/directory.h
index 4773d21..7384e37 100644
--- a/src/storage/minfs/directory.h
+++ b/src/storage/minfs/directory.h
@@ -47,8 +47,7 @@
   zx_status_t Read(void* data, size_t len, size_t off, size_t* out_actual) final;
   zx_status_t Write(const void* data, size_t len, size_t offset, size_t* out_actual) final;
   zx_status_t Append(const void* data, size_t len, size_t* out_end, size_t* out_actual) final;
-  zx_status_t Readdir(fs::vdircookie_t* cookie, void* dirents, size_t len,
-                      size_t* out_actual) final;
+  zx_status_t Readdir(fs::VdirCookie* cookie, void* dirents, size_t len, size_t* out_actual) final;
   zx_status_t Create(fbl::StringPiece name, uint32_t mode, fbl::RefPtr<fs::Vnode>* out) final;
   zx_status_t Unlink(fbl::StringPiece name, bool must_be_dir) final;
   zx_status_t Rename(fbl::RefPtr<fs::Vnode> newdir, fbl::StringPiece oldname,
diff --git a/src/storage/minfs/host.cc b/src/storage/minfs/host.cc
index ebdb07d..514d1d6 100644
--- a/src/storage/minfs/host.cc
+++ b/src/storage/minfs/host.cc
@@ -49,7 +49,7 @@
 struct HostFile {
   fbl::RefPtr<fs::Vnode> vn;
   uint64_t off = 0;
-  fs::vdircookie_t dircookie;
+  fs::VdirCookie dircookie;
 };
 
 constexpr int kMaxFd = 64;
@@ -268,7 +268,7 @@
   f->vn->Close();
   f->vn.reset();
   f->off = 0;
-  f->dircookie.Reset();
+  f->dircookie = fs::VdirCookie();
   return 0;
 }
 
@@ -443,7 +443,7 @@
 
   uint64_t magic = minfs::kMinfsMagic0;
   fbl::RefPtr<fs::Vnode> vn;
-  fs::vdircookie_t cookie = {};
+  fs::VdirCookie cookie;
   uint8_t* ptr = nullptr;
   uint8_t data[kDirBufSize] = {0};
   size_t size = 0;
diff --git a/src/storage/volume_image/fvm/BUILD.gn b/src/storage/volume_image/fvm/BUILD.gn
index 4b6260c..3757898 100644
--- a/src/storage/volume_image/fvm/BUILD.gn
+++ b/src/storage/volume_image/fvm/BUILD.gn
@@ -64,10 +64,6 @@
     "//third_party/googletest:gtest",
     "//zircon/public/lib/lz4",
   ]
-
-  # TODO(66402): UBSan has found an instance of undefined behavior in this target.
-  # Disable UBSan for this target temporarily until it is migrated into CI/CQ.
-  configs += [ "//build/config:temporarily_disable_ubsan_do_not_use" ]
 }
 
 test_data_image_path = "$target_out_dir/test_data.blk"
diff --git a/src/storage/volume_image/fvm/fvm_sparse_image_test.cc b/src/storage/volume_image/fvm/fvm_sparse_image_test.cc
index 72b8f12..a057561 100644
--- a/src/storage/volume_image/fvm/fvm_sparse_image_test.cc
+++ b/src/storage/volume_image/fvm/fvm_sparse_image_test.cc
@@ -355,59 +355,49 @@
 // structures and aligning them.  Things might have been a little easier if fvm::SparseImage was a
 // multiple of 8 bytes since it would have meant that fvm::PartitionDescriptor was 8 byte aligned
 // when it immediately follows the header, but we are where we are.
-struct alignas(8) AlignedSparseImage : fvm::SparseImage {
-  explicit AlignedSparseImage(const fvm::SparseImage& image) {
-    memcpy(this, &image, sizeof(*this));
+template<typename T>
+struct Aligner {
+  struct alignas(8) Aligned : T {};
+
+  Aligned operator ()(const T& in) {
+    Aligned out;
+    memcpy(&out, &in, sizeof(T));
+    return out;
   }
 };
 
 auto HeaderEq(const fvm::SparseImage& expected_header) {
-  using Header = AlignedSparseImage;
-  const Header header(expected_header);
-  return testing::AllOf(
-      testing::Field(&Header::header_length, testing::Eq(header.header_length)),
-      testing::Field(&Header::flags, testing::Eq(header.flags)),
-      testing::Field(&Header::magic, testing::Eq(header.magic)),
-      testing::Field(&Header::partition_count, testing::Eq(header.partition_count)),
-      testing::Field(&Header::slice_size, testing::Eq(header.slice_size)),
-      testing::Field(&Header::maximum_disk_size, testing::Eq(header.maximum_disk_size)),
-      testing::Field(&Header::version, testing::Eq(header.version)));
+  using Header = fvm::SparseImage;
+  return testing::ResultOf(Aligner<Header>(), testing::AllOf(
+      testing::Field(&Header::header_length, testing::Eq(expected_header.header_length)),
+      testing::Field(&Header::flags, testing::Eq(expected_header.flags)),
+      testing::Field(&Header::magic, testing::Eq(expected_header.magic)),
+      testing::Field(&Header::partition_count, testing::Eq(expected_header.partition_count)),
+      testing::Field(&Header::slice_size, testing::Eq(expected_header.slice_size)),
+      testing::Field(&Header::maximum_disk_size, testing::Eq(expected_header.maximum_disk_size)),
+      testing::Field(&Header::version, testing::Eq(expected_header.version))));
 }
 
-struct alignas(8) AlignedPartitionDescriptor : fvm::PartitionDescriptor {
-  explicit AlignedPartitionDescriptor(const fvm::PartitionDescriptor& descriptor) {
-    memcpy(this, &descriptor, sizeof(*this));
-  }
-};
-
 auto PartitionDescriptorEq(const fvm::PartitionDescriptor& expected_descriptor) {
-  using PartitionDescriptor = AlignedPartitionDescriptor;
-  const PartitionDescriptor descriptor(expected_descriptor);
-  return testing::AllOf(
-      testing::Field(&PartitionDescriptor::magic, testing::Eq(descriptor.magic)),
-      testing::Field(&PartitionDescriptor::flags, testing::Eq(descriptor.flags)),
-      testing::Field(&PartitionDescriptor::name, testing::ElementsAreArray(descriptor.name)),
-      testing::Field(&PartitionDescriptor::type, testing::ElementsAreArray(descriptor.type)));
+  using fvm::PartitionDescriptor;
+  return testing::ResultOf(Aligner<PartitionDescriptor>(), testing::AllOf(
+      testing::Field(&PartitionDescriptor::magic, testing::Eq(expected_descriptor.magic)),
+      testing::Field(&PartitionDescriptor::flags, testing::Eq(expected_descriptor.flags)),
+      testing::Field(&PartitionDescriptor::name, testing::ElementsAreArray(expected_descriptor.name)),
+      testing::Field(&PartitionDescriptor::type, testing::ElementsAreArray(expected_descriptor.type))));
 }
 
 auto PartitionDescriptorMatchesEntry(const FvmSparsePartitionEntry& expected_descriptor) {
-  return PartitionDescriptorEq(AlignedPartitionDescriptor(expected_descriptor.descriptor));
+  return PartitionDescriptorEq(expected_descriptor.descriptor);
 }
 
-struct alignas(8) AlignedExtentDescriptor : fvm::ExtentDescriptor {
-  explicit AlignedExtentDescriptor(const fvm::ExtentDescriptor& descriptor) {
-    memcpy(this, &descriptor, sizeof(*this));
-  }
-};
-
 [[maybe_unused]] auto ExtentDescriptorEq(const fvm::ExtentDescriptor& expected_descriptor) {
-  using ExtentDescriptor = AlignedExtentDescriptor;
-  const ExtentDescriptor descriptor(expected_descriptor);
-  return testing::AllOf(
-      testing::Field(&ExtentDescriptor::magic, testing::Eq(descriptor.magic)),
-      testing::Field(&ExtentDescriptor::slice_start, testing::Eq(descriptor.slice_start)),
-      testing::Field(&ExtentDescriptor::slice_count, testing::Eq(descriptor.slice_count)),
-      testing::Field(&ExtentDescriptor::extent_length, testing::Eq(descriptor.extent_length)));
+  using fvm::ExtentDescriptor;
+  return testing::ResultOf(Aligner<ExtentDescriptor>(), testing::AllOf(
+      testing::Field(&ExtentDescriptor::magic, testing::Eq(expected_descriptor.magic)),
+      testing::Field(&ExtentDescriptor::slice_start, testing::Eq(expected_descriptor.slice_start)),
+      testing::Field(&ExtentDescriptor::slice_count, testing::Eq(expected_descriptor.slice_count)),
+      testing::Field(&ExtentDescriptor::extent_length, testing::Eq(expected_descriptor.extent_length))));
 }
 
 MATCHER(ExtentDescriptorsAreEq, "Compares to Extent Descriptors") {
@@ -989,7 +979,7 @@
   std::unique_ptr<fvm::SparseReader> sparse_reader = nullptr;
   // This verifies metadata(header, partition descriptors and extent descriptors.)
   ASSERT_EQ(ZX_OK, fvm::SparseReader::Create(std::move(sparse_reader_impl), &sparse_reader));
-  ASSERT_THAT(sparse_reader->Image(), HeaderEq(container.serialized_image().header));
+  ASSERT_THAT(sparse_reader->Image(), Pointee(HeaderEq(container.serialized_image().header)));
 
   // Partition 1 metadata.
   {
@@ -1055,7 +1045,7 @@
   std::unique_ptr<fvm::SparseReader> sparse_reader = nullptr;
   // This verifies metadata(header, partition descriptors and extent descriptors.)
   ASSERT_EQ(ZX_OK, fvm::SparseReader::Create(std::move(sparse_reader_impl), &sparse_reader));
-  ASSERT_THAT(sparse_reader->Image(), HeaderEq(container.serialized_image().header));
+  ASSERT_THAT(sparse_reader->Image(), Pointee(HeaderEq(container.serialized_image().header)));
 
   // Partition 1 metadata.
   {
diff --git a/src/sys/appmgr/BUILD.gn b/src/sys/appmgr/BUILD.gn
index db53162..74a1cd3 100644
--- a/src/sys/appmgr/BUILD.gn
+++ b/src/sys/appmgr/BUILD.gn
@@ -210,14 +210,9 @@
   configs += [ "//build/config:Wno-conversion" ]
 }
 
-fuchsia_component("component") {
-  component_name = "appmgr"
-  deps = [ ":bin" ]
+fuchsia_package_with_single_component("appmgr") {
   manifest = "meta/appmgr.cml"
-}
-
-fuchsia_package("appmgr") {
-  deps = [ ":component" ]
+  deps = [ ":bin" ]
 }
 
 config_data("appmgr_scheme_config") {
diff --git a/src/sys/appmgr/component_controller_unittest.cc b/src/sys/appmgr/component_controller_unittest.cc
index 3dfbff3..0caf953 100644
--- a/src/sys/appmgr/component_controller_unittest.cc
+++ b/src/sys/appmgr/component_controller_unittest.cc
@@ -117,7 +117,7 @@
   // Arbitrary size.
   uint8_t buffer[4096];
 
-  fs::vdircookie_t cookie{};
+  fs::VdirCookie cookie;
   // Actual number of bytes read into the buffer.
   size_t real_len = 0;
   while (dir->Readdir(&cookie, buffer, sizeof(buffer), &real_len) == ZX_OK && real_len > 0) {
diff --git a/src/sys/appmgr/service_provider_dir_impl.cc b/src/sys/appmgr/service_provider_dir_impl.cc
index 70445f7..156055d 100644
--- a/src/sys/appmgr/service_provider_dir_impl.cc
+++ b/src/sys/appmgr/service_provider_dir_impl.cc
@@ -118,7 +118,7 @@
   return root_->GetAttributes(a);
 }
 
-zx_status_t ServiceProviderDirImpl::Readdir(fs::vdircookie_t* cookie, void* dirents, size_t len,
+zx_status_t ServiceProviderDirImpl::Readdir(fs::VdirCookie* cookie, void* dirents, size_t len,
                                             size_t* out_actual) {
   return root_->Readdir(cookie, dirents, len, out_actual);
 }
diff --git a/src/sys/appmgr/service_provider_dir_impl.h b/src/sys/appmgr/service_provider_dir_impl.h
index 0e3a94b..20b987eb 100644
--- a/src/sys/appmgr/service_provider_dir_impl.h
+++ b/src/sys/appmgr/service_provider_dir_impl.h
@@ -65,8 +65,7 @@
 
   zx_status_t GetAttributes(fs::VnodeAttributes* a) final;
 
-  zx_status_t Readdir(fs::vdircookie_t* cookie, void* dirents, size_t len,
-                      size_t* out_actual) final;
+  zx_status_t Readdir(fs::VdirCookie* cookie, void* dirents, size_t len, size_t* out_actual) final;
 
   zx_status_t GetNodeInfoForProtocol(fs::VnodeProtocol protocol, fs::Rights rights,
                                      fs::VnodeRepresentation* representation) final;
diff --git a/src/sys/appmgr/service_provider_dir_unittest.cc b/src/sys/appmgr/service_provider_dir_unittest.cc
index 3ca0d1e..af0e5e8 100644
--- a/src/sys/appmgr/service_provider_dir_unittest.cc
+++ b/src/sys/appmgr/service_provider_dir_unittest.cc
@@ -2,11 +2,10 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include <test/placeholders/cpp/fidl.h>
-
 #include <fs/service.h>
 #include <fs/synchronous_vfs.h>
 #include <gtest/gtest.h>
+#include <test/placeholders/cpp/fidl.h>
 
 #include "lib/gtest/real_loop_fixture.h"
 #include "src/lib/fxl/strings/substitute.h"
@@ -210,7 +209,7 @@
   AddService(&service_provider, "my", 2);
   AddService(&service_provider, "my", 3);
 
-  fs::vdircookie_t cookie = {};
+  fs::VdirCookie cookie;
   uint8_t buffer[kBufSz];
   size_t len;
   {
@@ -238,7 +237,7 @@
   AddService(parent_service_provider.get(), "parent", 2);
   service_provider.set_parent(parent_service_provider);
 
-  fs::vdircookie_t cookie = {};
+  fs::VdirCookie cookie;
   uint8_t buffer[kBufSz];
   size_t len;
   {
diff --git a/src/sys/build/components.gni b/src/sys/build/components.gni
index db94f7e..b37f269 100644
--- a/src/sys/build/components.gni
+++ b/src/sys/build/components.gni
@@ -9,6 +9,7 @@
 # See: https://fuchsia.dev/fuchsia-src/development/components/build
 import("fuchsia_component.gni")
 import("fuchsia_package.gni")
+import("fuchsia_package_with_single_component.gni")
 import("fuchsia_test.gni")
 import("fuchsia_test_package.gni")
 import("fuchsia_unittest_component.gni")
diff --git a/src/sys/build/fuchsia_package_with_single_component.gni b/src/sys/build/fuchsia_package_with_single_component.gni
new file mode 100644
index 0000000..ef175d2
--- /dev/null
+++ b/src/sys/build/fuchsia_package_with_single_component.gni
@@ -0,0 +1,92 @@
+# Copyright 2020 The Fuchsia Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("fuchsia_component.gni")
+import("fuchsia_package.gni")
+
+# Defines a package that contains a single component.
+# See: https://fuchsia.dev/fuchsia-src/development/components/build
+#
+# Developers often define a package that contains a single component.
+# This template fuses together fuchsia_package() and fuchsia_component() as a
+# convenience.
+#
+# Packages are units of distribution. It is beneficial to define multiple
+# components in the same package if you need to guarantee that several
+# components are always co-present, or if you'd like to be able to update
+# several components at once (by updating a single package).
+# This pattern is also commonly used to create hermetic integration tests.
+# For instance an integration test between two components where one is a client
+# of a service implemented in another component would include both the client
+# and server components.
+# However for the sake of simplicity, if you're developing a package with just
+# a single component then this template will save you some boilerplate.
+#
+# Example:
+# ```
+# executable("rot13_encoder_decoder") {
+#   sources = [ "rot13_encoder_decoder.cc" ]
+# }
+#
+# fuchsia_package_with_single_component("rot13") {
+#   manifest = "meta/rot13.cmx"
+#   deps = [ ":rot13_encoder_decoder" ]
+# }
+# ```
+#
+# Parameters
+#
+#   package_name (optional)
+#     The name of the package.
+#     Type: string
+#     Default: target_name
+#
+#   component_name (optional)
+#     The name of the component.
+#     Type: string
+#     Default: target_name
+#
+#   manifest (required)
+#     The component manifest.
+#     Type: path
+#
+#   deps
+#   testonly
+#   visibility
+template("fuchsia_package_with_single_component") {
+  assert(
+      defined(invoker.manifest),
+      "A `manifest` argument was missing when calling fuchsia_package_with_single_component($target_name)")
+
+  package_name = target_name
+  if (defined(invoker.package_name)) {
+    package_name = invoker.package_name
+  }
+  component_name = target_name
+  if (defined(invoker.component_name)) {
+    component_name = invoker.component_name
+  }
+
+  component_target = "${target_name}_component"
+  fuchsia_component(component_target) {
+    forward_variables_from(invoker,
+                           [
+                             "deps",
+                             "manifest",
+                             "testonly",
+                           ])
+    component_name = component_name
+    visibility = [ ":*" ]
+  }
+
+  fuchsia_package(target_name) {
+    forward_variables_from(invoker,
+                           [
+                             "testonly",
+                             "visibility",
+                           ])
+    package_name = package_name
+    deps = [ ":$component_target" ]
+  }
+}
diff --git a/src/sys/lib/cm_fidl_validator/BUILD.gn b/src/sys/lib/cm_fidl_validator/BUILD.gn
index 34fc7af..ef601da2 100644
--- a/src/sys/lib/cm_fidl_validator/BUILD.gn
+++ b/src/sys/lib/cm_fidl_validator/BUILD.gn
@@ -3,7 +3,7 @@
 # found in the LICENSE file.
 
 import("//build/rust/rustc_library.gni")
-import("//build/test/test_package.gni")
+import("//src/sys/build/components.gni")
 
 rustc_library("cm_fidl_validator") {
   with_unit_tests = true
@@ -26,15 +26,9 @@
   sources = [ "src/lib.rs" ]
 }
 
-test_package("cm_fidl_validator_tests") {
+fuchsia_unittest_package("cm_fidl_validator_tests") {
+  manifest = "meta/cm_fidl_validator_tests.cmx"
   deps = [ ":cm_fidl_validator_test" ]
-
-  tests = [
-    {
-      name = "cm_fidl_validator_lib_test"
-      dest = "cm_fidl_validator_tests"
-    },
-  ]
 }
 
 group("tests") {
diff --git a/src/sys/lib/cm_fidl_validator/meta/cm_fidl_validator_tests.cmx b/src/sys/lib/cm_fidl_validator/meta/cm_fidl_validator_tests.cmx
index 202dee7..b4ec20c 100644
--- a/src/sys/lib/cm_fidl_validator/meta/cm_fidl_validator_tests.cmx
+++ b/src/sys/lib/cm_fidl_validator/meta/cm_fidl_validator_tests.cmx
@@ -3,6 +3,6 @@
         "sdk/lib/diagnostics/syslog/client.shard.cmx"
     ],
     "program": {
-        "binary": "test/cm_fidl_validator_tests"
+        "binary": "bin/cm_fidl_validator_lib_test"
     }
 }
diff --git a/src/sys/lib/cm_json/BUILD.gn b/src/sys/lib/cm_json/BUILD.gn
index d4ac6a9..3b4752c 100644
--- a/src/sys/lib/cm_json/BUILD.gn
+++ b/src/sys/lib/cm_json/BUILD.gn
@@ -3,8 +3,8 @@
 # found in the LICENSE file.
 
 import("//build/rust/rustc_library.gni")
-import("//build/test/test_package.gni")
 import("//build/testing/environments.gni")
+import("//src/sys/build/components.gni")
 
 source_set("cmx_schema_json") {
   inputs = [ "cmx_schema.json" ]
@@ -31,16 +31,12 @@
   inputs = [ "cmx_schema.json" ]
 }
 
-test_package("cm_json_tests") {
+fuchsia_unittest_package("cm_json_tests") {
+  manifest = "meta/cm_json_tests.cmx"
   deps = [ ":cm_json_test" ]
-
-  tests = [
-    {
-      name = "cm_json_lib_test"
-      dest = "cm_json_tests"
-      environments = basic_envs
-    },
-  ]
+  test_specs = {
+    environments = basic_envs
+  }
 }
 
 group("tests") {
diff --git a/src/sys/lib/cm_json/meta/cm_json_tests.cmx b/src/sys/lib/cm_json/meta/cm_json_tests.cmx
index 4ec1d72..65788c4 100644
--- a/src/sys/lib/cm_json/meta/cm_json_tests.cmx
+++ b/src/sys/lib/cm_json/meta/cm_json_tests.cmx
@@ -3,6 +3,6 @@
         "sdk/lib/diagnostics/syslog/client.shard.cmx"
     ],
     "program": {
-        "binary": "test/cm_json_tests"
+        "binary": "bin/cm_json_lib_test"
     }
 }
diff --git a/src/sys/lib/cm_rust/BUILD.gn b/src/sys/lib/cm_rust/BUILD.gn
index ef8a336..a76caee 100644
--- a/src/sys/lib/cm_rust/BUILD.gn
+++ b/src/sys/lib/cm_rust/BUILD.gn
@@ -3,8 +3,8 @@
 # found in the LICENSE file.
 
 import("//build/rust/rustc_library.gni")
-import("//build/test/test_package.gni")
 import("//build/testing/environments.gni")
+import("//src/sys/build/components.gni")
 
 rustc_library("cm_rust") {
   with_unit_tests = true
@@ -27,16 +27,12 @@
   ]
 }
 
-test_package("cm_rust_tests") {
+fuchsia_unittest_package("cm_rust_tests") {
+  manifest = "meta/cm_rust_tests.cmx"
   deps = [ ":cm_rust_test" ]
-
-  tests = [
-    {
-      name = "cm_rust_lib_test"
-      dest = "cm_rust_tests"
-      environments = basic_envs
-    },
-  ]
+  test_specs = {
+    environments = basic_envs
+  }
 }
 
 group("tests") {
diff --git a/src/sys/lib/cm_rust/meta/cm_rust_tests.cmx b/src/sys/lib/cm_rust/meta/cm_rust_tests.cmx
index c7149c7..33e9fd9 100644
--- a/src/sys/lib/cm_rust/meta/cm_rust_tests.cmx
+++ b/src/sys/lib/cm_rust/meta/cm_rust_tests.cmx
@@ -3,6 +3,6 @@
         "sdk/lib/diagnostics/syslog/client.shard.cmx"
     ],
     "program": {
-        "binary": "test/cm_rust_tests"
+        "binary": "bin/cm_rust_lib_test"
     }
 }
diff --git a/src/sys/lib/cm_types/BUILD.gn b/src/sys/lib/cm_types/BUILD.gn
index 152f82a1..4034f60 100644
--- a/src/sys/lib/cm_types/BUILD.gn
+++ b/src/sys/lib/cm_types/BUILD.gn
@@ -3,8 +3,8 @@
 # found in the LICENSE file.
 
 import("//build/rust/rustc_library.gni")
-import("//build/test/test_package.gni")
 import("//build/testing/environments.gni")
+import("//src/sys/build/components.gni")
 
 rustc_library("cm_types") {
   with_unit_tests = true
@@ -21,16 +21,12 @@
   sources = [ "src/lib.rs" ]
 }
 
-test_package("cm_types_tests") {
+fuchsia_unittest_package("cm_types_tests") {
+  manifest = "meta/cm_types_tests.cmx"
   deps = [ ":cm_types_test" ]
-
-  tests = [
-    {
-      name = "cm_types_lib_test"
-      dest = "cm_types_tests"
-      environments = basic_envs
-    },
-  ]
+  test_specs = {
+    environments = basic_envs
+  }
 }
 
 group("tests") {
diff --git a/src/sys/lib/cm_types/meta/cm_types_tests.cmx b/src/sys/lib/cm_types/meta/cm_types_tests.cmx
index 1b8ef33..2c5816b 100644
--- a/src/sys/lib/cm_types/meta/cm_types_tests.cmx
+++ b/src/sys/lib/cm_types/meta/cm_types_tests.cmx
@@ -3,6 +3,6 @@
         "sdk/lib/diagnostics/syslog/client.shard.cmx"
     ],
     "program": {
-        "binary": "test/cm_types_tests"
+        "binary": "bin/cm_types_lib_test"
     }
 }
diff --git a/src/sys/lib/component_id_index/BUILD.gn b/src/sys/lib/component_id_index/BUILD.gn
index e685496..08ba153 100644
--- a/src/sys/lib/component_id_index/BUILD.gn
+++ b/src/sys/lib/component_id_index/BUILD.gn
@@ -3,8 +3,8 @@
 # found in the LICENSE file.
 
 import("//build/rust/rustc_library.gni")
-import("//build/test/test_package.gni")
 import("//build/testing/environments.gni")
+import("//src/sys/build/components.gni")
 
 rustc_library("component_id_index") {
   with_unit_tests = true
@@ -27,16 +27,12 @@
   ]
 }
 
-test_package("component-id-index-tests") {
+fuchsia_unittest_package("component-id-index-tests") {
+  manifest = "meta/component-id-index-tests.cmx"
   deps = [ ":component_id_index_test" ]
-
-  tests = [
-    {
-      name = "component_id_index_lib_test"
-      dest = "component_id_index_tests"
-      environments = basic_envs
-    },
-  ]
+  test_specs = {
+    environments = basic_envs
+  }
 }
 
 group("tests") {
diff --git a/src/sys/lib/component_id_index/meta/component_id_index_tests.cmx b/src/sys/lib/component_id_index/meta/component-id-index-tests.cmx
similarity index 66%
copy from src/sys/lib/component_id_index/meta/component_id_index_tests.cmx
copy to src/sys/lib/component_id_index/meta/component-id-index-tests.cmx
index af5d0ec..7ac8ed4 100644
--- a/src/sys/lib/component_id_index/meta/component_id_index_tests.cmx
+++ b/src/sys/lib/component_id_index/meta/component-id-index-tests.cmx
@@ -3,6 +3,6 @@
         "sdk/lib/diagnostics/syslog/client.shard.cmx"
     ],
     "program": {
-        "binary": "test/component_id_index_tests"
+        "binary": "bin/component_id_index_lib_test"
     }
 }
diff --git a/src/sys/lib/fidl-connector/BUILD.gn b/src/sys/lib/fidl-connector/BUILD.gn
index 66d6379..a335b56 100644
--- a/src/sys/lib/fidl-connector/BUILD.gn
+++ b/src/sys/lib/fidl-connector/BUILD.gn
@@ -4,8 +4,8 @@
 
 import("//build/fidl/fidl.gni")
 import("//build/rust/rustc_library.gni")
-import("//build/test/test_package.gni")
 import("//build/testing/environments.gni")
+import("//src/sys/build/components.gni")
 
 rustc_library("fidl-connector") {
   name = "fidl_connector"
@@ -30,15 +30,12 @@
   sources = [ "src/lib.rs" ]
 }
 
-unittest_package("fidl-connector-tests") {
+fuchsia_unittest_package("fidl-connector-tests") {
+  executable_path = "bin/fidl_connector_lib_test"
   deps = [ ":fidl-connector_test" ]
-
-  tests = [
-    {
-      name = "fidl_connector_lib_test"
-      environments = basic_envs
-    },
-  ]
+  test_specs = {
+    environments = basic_envs
+  }
 }
 
 fidl("test.fidl.connector") {
diff --git a/src/sys/lib/fidl-fuchsia-pkg-ext/BUILD.gn b/src/sys/lib/fidl-fuchsia-pkg-ext/BUILD.gn
index b36cb82..3663cb5 100644
--- a/src/sys/lib/fidl-fuchsia-pkg-ext/BUILD.gn
+++ b/src/sys/lib/fidl-fuchsia-pkg-ext/BUILD.gn
@@ -4,8 +4,8 @@
 
 import("//build/package.gni")
 import("//build/rust/rustc_library.gni")
-import("//build/test/test_package.gni")
 import("//build/testing/environments.gni")
+import("//src/sys/build/components.gni")
 import("//tools/fidl/measure-tape/measure_tape.gni")
 
 measure_tape("measure_fuchsia_pkg_index_entry") {
@@ -65,16 +65,12 @@
   ]
 }
 
-test_package("fidl-fuchsia-pkg-ext-tests") {
+fuchsia_unittest_package("fidl-fuchsia-pkg-ext-tests") {
+  manifest = "meta/fidl-fuchsia-pkg-ext-tests.cmx"
   deps = [ ":fidl-fuchsia-pkg-ext_test" ]
-
-  tests = [
-    {
-      name = "fidl_fuchsia_pkg_ext_lib_test"
-      dest = "fidl-fuchsia-pkg-ext-tests"
-      environments = basic_envs
-    },
-  ]
+  test_specs = {
+    environments = basic_envs
+  }
 }
 
 group("tests") {
diff --git a/src/sys/lib/fidl-fuchsia-pkg-ext/meta/fidl-fuchsia-pkg-ext-tests.cmx b/src/sys/lib/fidl-fuchsia-pkg-ext/meta/fidl-fuchsia-pkg-ext-tests.cmx
index ab713f2..3d5070c 100644
--- a/src/sys/lib/fidl-fuchsia-pkg-ext/meta/fidl-fuchsia-pkg-ext-tests.cmx
+++ b/src/sys/lib/fidl-fuchsia-pkg-ext/meta/fidl-fuchsia-pkg-ext-tests.cmx
@@ -3,6 +3,6 @@
         "sdk/lib/diagnostics/syslog/client.shard.cmx"
     ],
     "program": {
-        "binary": "test/fidl-fuchsia-pkg-ext-tests"
+        "binary": "bin/fidl_fuchsia_pkg_ext_lib_test"
     }
 }
diff --git a/src/sys/lib/fidl-fuchsia-pkg-rewrite-ext/BUILD.gn b/src/sys/lib/fidl-fuchsia-pkg-rewrite-ext/BUILD.gn
index 532f879..bdf01a4 100644
--- a/src/sys/lib/fidl-fuchsia-pkg-rewrite-ext/BUILD.gn
+++ b/src/sys/lib/fidl-fuchsia-pkg-rewrite-ext/BUILD.gn
@@ -4,8 +4,8 @@
 
 import("//build/package.gni")
 import("//build/rust/rustc_library.gni")
-import("//build/test/test_package.gni")
 import("//build/testing/environments.gni")
+import("//src/sys/build/components.gni")
 
 rustc_library("fidl-fuchsia-pkg-rewrite-ext") {
   version = "0.1.0"
@@ -33,16 +33,12 @@
   ]
 }
 
-test_package("fidl-fuchsia-pkg-rewrite-ext-tests") {
+fuchsia_unittest_package("fidl-fuchsia-pkg-rewrite-ext-tests") {
+  manifest = "meta/fidl-fuchsia-pkg-rewrite-ext-tests.cmx"
   deps = [ ":fidl-fuchsia-pkg-rewrite-ext_test" ]
-
-  tests = [
-    {
-      name = "fidl_fuchsia_pkg_rewrite_ext_lib_test"
-      dest = "fidl-fuchsia-pkg-rewrite-ext-tests"
-      environments = basic_envs
-    },
-  ]
+  test_specs = {
+    environments = basic_envs
+  }
 }
 
 group("tests") {
diff --git a/src/sys/lib/fidl-fuchsia-pkg-rewrite-ext/meta/fidl-fuchsia-pkg-rewrite-ext-tests.cmx b/src/sys/lib/fidl-fuchsia-pkg-rewrite-ext/meta/fidl-fuchsia-pkg-rewrite-ext-tests.cmx
index e3f08e6..e95051a5 100644
--- a/src/sys/lib/fidl-fuchsia-pkg-rewrite-ext/meta/fidl-fuchsia-pkg-rewrite-ext-tests.cmx
+++ b/src/sys/lib/fidl-fuchsia-pkg-rewrite-ext/meta/fidl-fuchsia-pkg-rewrite-ext-tests.cmx
@@ -3,6 +3,6 @@
         "sdk/lib/diagnostics/syslog/client.shard.cmx"
     ],
     "program": {
-        "binary": "test/fidl-fuchsia-pkg-rewrite-ext-tests"
+        "binary": "bin/fidl_fuchsia_pkg_rewrite_ext_lib_test"
     }
 }
diff --git a/src/sys/lib/fuchsia-bootfs/BUILD.gn b/src/sys/lib/fuchsia-bootfs/BUILD.gn
index 262f9cb..7a3595c 100644
--- a/src/sys/lib/fuchsia-bootfs/BUILD.gn
+++ b/src/sys/lib/fuchsia-bootfs/BUILD.gn
@@ -2,8 +2,8 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 import("//build/rust/rustc_library.gni")
-import("//build/test/test_package.gni")
 import("//build/testing/environments.gni")
+import("//src/sys/build/components.gni")
 
 rustc_library("fuchsia-bootfs") {
   name = "fuchsia_bootfs"
@@ -27,39 +27,34 @@
   ]
 }
 
-test_package("fuchsia-bootfs-tests") {
-  deps = [ ":fuchsia-bootfs_test" ]
+resource("testdata_bootfs") {
+  sources = [ "testdata/basic.bootfs.uncompressed" ]
+  outputs = [ "data/basic.bootfs.uncompressed" ]
+}
 
-  resources = [
-    {
-      path = rebase_path("testdata/basic.bootfs.uncompresssed")
-      dest = "basic.bootfs.uncompresssed"
-    },
-    {
-      path = rebase_path("testdata/input/dir/empty")
-      dest = "golden/dir/empty"
-    },
-    {
-      path = rebase_path("testdata/input/dir/lorem.txt")
-      dest = "golden/dir/lorem.txt"
-    },
-    {
-      path = rebase_path("testdata/input/empty")
-      dest = "golden/empty"
-    },
-    {
-      path = rebase_path("testdata/input/random.dat")
-      dest = "golden/random.dat"
-    },
-    {
-      path = rebase_path("testdata/input/simple.txt")
-      dest = "golden/simple.txt"
-    },
+resource("testdata_golden_subdir") {
+  sources = [
+    "testdata/input/dir/empty",
+    "testdata/input/dir/lorem.txt",
   ]
+  outputs = [ "data/golden/dir/{{source_file_part}}" ]
+}
 
-  tests = [
-    {
-      name = "fuchsia_bootfs_lib_test"
-    },
+resource("testdata_golden_root") {
+  sources = [
+    "testdata/input/empty",
+    "testdata/input/random.dat",
+    "testdata/input/simple.txt",
+  ]
+  outputs = [ "data/golden/{{source_file_part}}" ]
+}
+
+fuchsia_unittest_package("fuchsia-bootfs-tests") {
+  manifest = "meta/fuchsia_bootfs_lib_test.cmx"
+  deps = [
+    ":fuchsia-bootfs_test",
+    ":testdata_bootfs",
+    ":testdata_golden_root",
+    ":testdata_golden_subdir",
   ]
 }
diff --git a/src/sys/lib/fuchsia-bootfs/meta/fuchsia_bootfs_lib_test.cmx b/src/sys/lib/fuchsia-bootfs/meta/fuchsia_bootfs_lib_test.cmx
index 49e5e3e..240b6d8 100644
--- a/src/sys/lib/fuchsia-bootfs/meta/fuchsia_bootfs_lib_test.cmx
+++ b/src/sys/lib/fuchsia-bootfs/meta/fuchsia_bootfs_lib_test.cmx
@@ -3,6 +3,6 @@
         "sdk/lib/diagnostics/syslog/client.shard.cmx"
     ],
     "program": {
-        "binary": "test/fuchsia_bootfs_lib_test"
+        "binary": "bin/fuchsia_bootfs_lib_test"
     }
 }
diff --git a/src/sys/lib/fuchsia-bootfs/src/lib.rs b/src/sys/lib/fuchsia-bootfs/src/lib.rs
index 7534b45..29bcc1d 100644
--- a/src/sys/lib/fuchsia-bootfs/src/lib.rs
+++ b/src/sys/lib/fuchsia-bootfs/src/lib.rs
@@ -237,7 +237,7 @@
     };
 
     static GOLDEN_DIR: &str = "/pkg/data/golden/";
-    static BASIC_BOOTFS_UNCOMPRESSED_FILE: &str = "/pkg/data/basic.bootfs.uncompresssed";
+    static BASIC_BOOTFS_UNCOMPRESSED_FILE: &str = "/pkg/data/basic.bootfs.uncompressed";
 
     fn read_file_into_hashmap(dir: &str, filename: &str, map: &mut HashMap<String, Vec<u8>>) {
         let mut file_buffer = Vec::new();
diff --git a/src/sys/lib/fuchsia-bootfs/testdata/basic.bootfs.uncompresssed b/src/sys/lib/fuchsia-bootfs/testdata/basic.bootfs.uncompressed
similarity index 100%
rename from src/sys/lib/fuchsia-bootfs/testdata/basic.bootfs.uncompresssed
rename to src/sys/lib/fuchsia-bootfs/testdata/basic.bootfs.uncompressed
Binary files differ
diff --git a/src/sys/lib/fuchsia_backoff/BUILD.gn b/src/sys/lib/fuchsia_backoff/BUILD.gn
index fe8b6e37..5d2bc4e 100644
--- a/src/sys/lib/fuchsia_backoff/BUILD.gn
+++ b/src/sys/lib/fuchsia_backoff/BUILD.gn
@@ -4,8 +4,8 @@
 
 import("//build/package.gni")
 import("//build/rust/rustc_library.gni")
-import("//build/test/test_package.gni")
 import("//build/testing/environments.gni")
+import("//src/sys/build/components.gni")
 
 rustc_library("fuchsia_backoff") {
   version = "0.1.0"
@@ -21,13 +21,10 @@
   sources = [ "src/lib.rs" ]
 }
 
-unittest_package("fuchsia_backoff_tests") {
+fuchsia_unittest_package("fuchsia_backoff_tests") {
+  executable_path = "bin/fuchsia_backoff_lib_test"
   deps = [ ":fuchsia_backoff_test" ]
-
-  tests = [
-    {
-      name = "fuchsia_backoff_lib_test"
-      environments = basic_envs
-    },
-  ]
+  test_specs = {
+    environments = basic_envs
+  }
 }
diff --git a/src/sys/lib/runner/BUILD.gn b/src/sys/lib/runner/BUILD.gn
index 0520b27..06369fc 100644
--- a/src/sys/lib/runner/BUILD.gn
+++ b/src/sys/lib/runner/BUILD.gn
@@ -2,10 +2,9 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-import("//build/package.gni")
 import("//build/rust/rustc_library.gni")
-import("//build/test/test_package.gni")
 import("//build/testing/environments.gni")
+import("//src/sys/build/components.gni")
 
 rustc_library("runner") {
   version = "0.1.0"
@@ -42,16 +41,12 @@
   ]
 }
 
-test_package("runner_tests") {
+fuchsia_unittest_package("runner_tests") {
+  manifest = "meta/runner_tests.cmx"
   deps = [ ":runner_test" ]
-
-  tests = [
-    {
-      name = "runner_lib_test"
-      dest = "runner_tests"
-      environments = basic_envs
-    },
-  ]
+  test_specs = {
+    environments = basic_envs
+  }
 }
 
 group("tests") {
diff --git a/src/sys/lib/runner/meta/runner_tests.cmx b/src/sys/lib/runner/meta/runner_tests.cmx
index e5e6c64..b90a4c3 100644
--- a/src/sys/lib/runner/meta/runner_tests.cmx
+++ b/src/sys/lib/runner/meta/runner_tests.cmx
@@ -3,6 +3,6 @@
         "sdk/lib/diagnostics/syslog/client.shard.cmx"
     ],
     "program": {
-        "binary": "test/runner_tests"
+        "binary": "bin/runner_lib_test"
     }
 }
diff --git a/src/sys/lib/runner/src/component.rs b/src/sys/lib/runner/src/component.rs
index 50b33c3..bb8ca6a 100644
--- a/src/sys/lib/runner/src/component.rs
+++ b/src/sys/lib/runner/src/component.rs
@@ -800,7 +800,7 @@
             let ns = setup_namespace(true, vec![])?;
 
             let _launch_info = configure_launcher(LauncherConfigArgs {
-                bin_path: "test/runner_tests",
+                bin_path: "bin/runner_lib_test",
                 name: "name",
                 args: None,
                 ns: ns,
@@ -816,7 +816,7 @@
 
             let ls = recv.await?;
 
-            assert_eq!(ls.args, vec!("/pkg/test/runner_tests".to_owned()));
+            assert_eq!(ls.args, vec!("/pkg/bin/runner_lib_test".to_owned()));
 
             Ok(())
         }
@@ -830,7 +830,7 @@
             let args = vec!["args1".to_owned(), "arg2".to_owned()];
 
             let _launch_info = configure_launcher(LauncherConfigArgs {
-                bin_path: "test/runner_tests",
+                bin_path: "bin/runner_lib_test",
                 name: "name",
                 args: Some(args.clone()),
                 ns: ns,
@@ -846,7 +846,7 @@
 
             let ls = recv.await?;
 
-            let mut expected = vec!["/pkg/test/runner_tests".to_owned()];
+            let mut expected = vec!["/pkg/bin/runner_lib_test".to_owned()];
             expected.extend(args);
             assert_eq!(ls.args, expected);
 
@@ -860,7 +860,7 @@
             let ns = setup_namespace(true, vec!["/some_path1", "/some_path2"])?;
 
             let _launch_info = configure_launcher(LauncherConfigArgs {
-                bin_path: "test/runner_tests",
+                bin_path: "bin/runner_lib_test",
                 name: "name",
                 args: None,
                 ns: ns,
@@ -906,7 +906,7 @@
             }
 
             let _launch_info = configure_launcher(LauncherConfigArgs {
-                bin_path: "test/runner_tests",
+                bin_path: "bin/runner_lib_test",
                 name: "name",
                 args: None,
                 ns: ns,
@@ -940,7 +940,7 @@
             let ns = setup_namespace(true, vec![])?;
 
             let _launch_info = configure_launcher(LauncherConfigArgs {
-                bin_path: "test/runner_tests",
+                bin_path: "bin/runner_lib_test",
                 name: "name",
                 args: None,
                 ns: ns,
@@ -984,7 +984,7 @@
             }
 
             let _launch_info = configure_launcher(LauncherConfigArgs {
-                bin_path: "test/runner_tests",
+                bin_path: "bin/runner_lib_test",
                 name: "name",
                 args: None,
                 ns: ns,
diff --git a/src/sys/pkg/bin/omaha-client/src/metrics.rs b/src/sys/pkg/bin/omaha-client/src/metrics.rs
index a839439..e69a083 100644
--- a/src/sys/pkg/bin/omaha-client/src/metrics.rs
+++ b/src/sys/pkg/bin/omaha-client/src/metrics.rs
@@ -10,7 +10,7 @@
 use log::{info, warn};
 use omaha_client::{
     metrics::{ClockType, Metrics, MetricsReporter},
-    protocol::request::InstallSource,
+    protocol::request::{EventResult, EventType, InstallSource},
 };
 use std::{convert::TryFrom, time::Duration};
 
@@ -43,6 +43,60 @@
     }
 }
 
+fn mos_event_type_from_event_type(
+    t: EventType,
+) -> mos_metrics_registry::OmahaEventLostMetricDimensionEventType {
+    match t {
+        EventType::Unknown => mos_metrics_registry::OmahaEventLostMetricDimensionEventType::Unknown,
+        EventType::DownloadComplete => {
+            mos_metrics_registry::OmahaEventLostMetricDimensionEventType::DownloadComplete
+        }
+        EventType::InstallComplete => {
+            mos_metrics_registry::OmahaEventLostMetricDimensionEventType::InstallComplete
+        }
+        EventType::UpdateComplete => {
+            mos_metrics_registry::OmahaEventLostMetricDimensionEventType::UpdateComplete
+        }
+        EventType::UpdateDownloadStarted => {
+            mos_metrics_registry::OmahaEventLostMetricDimensionEventType::UpdateDownloadStarted
+        }
+        EventType::UpdateDownloadFinished => {
+            mos_metrics_registry::OmahaEventLostMetricDimensionEventType::UpdateDownloadFinished
+        }
+        EventType::RebootedAfterUpdate => {
+            mos_metrics_registry::OmahaEventLostMetricDimensionEventType::RebootedAfterUpdate
+        }
+    }
+}
+
+fn mos_event_result_from_event_result(
+    r: EventResult,
+) -> mos_metrics_registry::OmahaEventLostMetricDimensionEventResult {
+    match r {
+        EventResult::Error => {
+            mos_metrics_registry::OmahaEventLostMetricDimensionEventResult::Error
+        },
+        EventResult::Success => {
+            mos_metrics_registry::OmahaEventLostMetricDimensionEventResult::Success
+        },
+        EventResult::SuccessAndRestartRequired => {
+            mos_metrics_registry::OmahaEventLostMetricDimensionEventResult::SuccessAndRestartRequired
+        },
+        EventResult::SuccessAndAppRestartRequired => {
+            mos_metrics_registry::OmahaEventLostMetricDimensionEventResult::SuccessAndAppRestartRequired
+        },
+        EventResult::Cancelled => {
+            mos_metrics_registry::OmahaEventLostMetricDimensionEventResult::Cancelled
+        },
+        EventResult::ErrorInSystemInstaller => {
+            mos_metrics_registry::OmahaEventLostMetricDimensionEventResult::ErrorInSystemInstaller
+        },
+        EventResult::UpdateDeferred => {
+            mos_metrics_registry::OmahaEventLostMetricDimensionEventResult::UpdateDeferred
+        },
+    }
+}
+
 impl MetricsReporter for CobaltMetricsReporter {
     fn report_metrics(&mut self, metrics: Metrics) -> Result<(), Error> {
         info!("Reporting metrics to Cobalt: {:?}", metrics);
@@ -205,6 +259,16 @@
                     count as i64,
                 );
             }
+            Metrics::OmahaEventLost(event) => {
+                let event_type = mos_event_type_from_event_type(event.event_type);
+                let result = mos_event_result_from_event_result(event.event_result);
+                self.cobalt_sender.log_event_count(
+                    mos_metrics_registry::OMAHA_EVENT_LOST_METRIC_ID,
+                    (event_type, result),
+                    0,
+                    1,
+                );
+            }
         }
         Ok(())
     }
@@ -217,7 +281,7 @@
     use fidl_fuchsia_cobalt::{CountEvent, EventPayload};
     use fuchsia_async as fasync;
     use futures::stream::StreamExt;
-    use omaha_client::metrics::UpdateCheckFailureReason;
+    use omaha_client::{metrics::UpdateCheckFailureReason, protocol::request::Event};
     use std::time::Duration;
 
     async fn assert_metrics(metrics: Metrics, expected_events: &[CobaltEvent]) {
@@ -444,6 +508,62 @@
         .await;
     }
 
+    #[fasync::run_singlethreaded(test)]
+    async fn test_failed_boot_attempts() {
+        assert_metrics(
+            Metrics::FailedBootAttempts(42),
+            &[CobaltEvent {
+                metric_id: mos_metrics_registry::FAILED_BOOT_ATTEMPTS_METRIC_ID,
+                event_codes: vec![
+                    mos_metrics_registry::FailedBootAttemptsMetricDimensionResult::Success,
+                ]
+                .as_event_codes(),
+                component: None,
+                payload: EventPayload::EventCount(CountEvent {
+                    period_duration_micros: 0,
+                    count: 42,
+                }),
+            }],
+        )
+        .await;
+    }
+
+    #[fasync::run_singlethreaded(test)]
+    async fn test_omaha_event_lost() {
+        macro_rules! assert_lost_combo {
+            ($typeId:ident, $resId:ident) => {
+                assert_metrics(
+                    Metrics::OmahaEventLost(Event {
+                        event_type: EventType::$typeId,
+                        event_result: EventResult::$resId,
+                        ..Event::default()
+                    }),
+                    &[CobaltEvent {
+                        metric_id: mos_metrics_registry::OMAHA_EVENT_LOST_METRIC_ID,
+                        event_codes: (
+                            mos_metrics_registry::OmahaEventLostMetricDimensionEventType::$typeId,
+                            mos_metrics_registry::OmahaEventLostMetricDimensionEventResult::$resId,
+                        )
+                            .as_event_codes(),
+                        component: None,
+                        payload: EventPayload::EventCount(CountEvent {
+                            period_duration_micros: 0,
+                            count: 1,
+                        }),
+                    }],
+                )
+                .await;
+            };
+        }
+        assert_lost_combo!(Unknown, Error);
+        assert_lost_combo!(DownloadComplete, Success);
+        assert_lost_combo!(InstallComplete, SuccessAndRestartRequired);
+        assert_lost_combo!(UpdateComplete, SuccessAndAppRestartRequired);
+        assert_lost_combo!(UpdateDownloadStarted, Cancelled);
+        assert_lost_combo!(UpdateDownloadFinished, ErrorInSystemInstaller);
+        assert_lost_combo!(RebootedAfterUpdate, UpdateDeferred);
+    }
+
     #[test]
     fn test_duration_to_cobalt_metrics() {
         assert_eq!(duration_to_cobalt_micros(Duration::from_micros(0), "test"), Some(0));
diff --git a/src/sys/pkg/bin/omaha-client/src/policy.rs b/src/sys/pkg/bin/omaha-client/src/policy.rs
index f9e7003..d3d5e8f 100644
--- a/src/sys/pkg/bin/omaha-client/src/policy.rs
+++ b/src/sys/pkg/bin/omaha-client/src/policy.rs
@@ -642,9 +642,13 @@
         }
     }
 
+    // N.B. not using Arbitrary impl for duration here due to potential flake issues.
+    // See also https://fxrev.dev/464538 and https://github.com/AltSysrq/proptest/issues/221
+    // for context.
     prop_compose! {
-        fn arb_duration_up_to_percent_of_max(ratio: f32)(duration: Duration) -> Duration {
-            duration.mul_f32(ratio)
+        fn arb_duration_up_to_percent_of_max(ratio: f32)
+                                            (secs: u64, nsec in 0..1_000_000_000u32) -> Duration {
+            Duration::new(secs, nsec).mul_f32(ratio)
         }
     }
 
@@ -681,7 +685,9 @@
         fn test_fuzz_interval_lower_bounds(interval in arb_duration_up_to_percent_of_max(0.50),
             interval_fuzz_seed: u64,
             fuzz_percentage_range in 0u32..50u32) {
-            assert!(interval <= Duration::new(std::u64::MAX / 2, 0));
+            // Account for differences in integer vs single-precision arithmetic.
+            let epsilon = (u64::MAX as f32 * 0.50f32) as u64 - u64::MAX/2;
+            assert!(interval <= Duration::new(u64::MAX / 2 + epsilon, 0));
             let fuzzed_interval = fuzz_interval(interval, interval_fuzz_seed, fuzz_percentage_range);
 
             let lower_bound_multiplier = 1.0 - fuzz_percentage_range as f32 / 200.0;
@@ -693,7 +699,9 @@
         fn test_fuzz_interval_upper_bounds(interval in arb_duration_up_to_percent_of_max(0.75),
             interval_fuzz_seed: u64,
             fuzz_percentage_range in 0u32..25u32) {
-            assert!(interval <= Duration::new(std::u64::MAX / 4 * 3, 0));
+            // Account for differences in integer vs single-precision arithmetic.
+            let epsilon = (u64::MAX as f32 * 0.75f32) as u64 - u64::MAX/4*3;
+            assert!(interval <= Duration::new(u64::MAX / 4 * 3 + epsilon, 0));
             let fuzzed_interval = fuzz_interval(interval, interval_fuzz_seed, fuzz_percentage_range);
 
             let upper_bound_multiplier = 1.0 + fuzz_percentage_range as f32 / 200.0;
diff --git a/src/sys/pkg/bin/system-update-checker/src/apply.rs b/src/sys/pkg/bin/system-update-checker/src/apply.rs
index 6992fa0..316d96d 100644
--- a/src/sys/pkg/bin/system-update-checker/src/apply.rs
+++ b/src/sys/pkg/bin/system-update-checker/src/apply.rs
@@ -155,6 +155,8 @@
                 .context("notify installer it can reboot when ready")
                 .map_err(|e| (apply_progress, e))?;
             // On success, wait for reboot to happen.
+
+            fx_log_info!("Reboot contoller unblocked, waiting for reboot");
             let () = future::pending().await;
             unreachable!();
         }
diff --git a/src/sys/pkg/bin/system-updater/src/update.rs b/src/sys/pkg/bin/system-updater/src/update.rs
index f210d94..b737731 100644
--- a/src/sys/pkg/bin/system-updater/src/update.rs
+++ b/src/sys/pkg/bin/system-updater/src/update.rs
@@ -211,6 +211,7 @@
             .run(&mut co, &mut phase, &mut target_version)
             .await;
 
+        fx_log_info!("system update attempt completed, logging metrics");
         let status_code = metrics::result_to_status_code(attempt_res.as_ref().map(|_| ()));
         let target_build_version = target_version.build_version.to_string();
         cobalt.log_ota_result_attempt(
@@ -234,6 +235,7 @@
         }
 
         // wait for all cobalt events to be flushed to the service.
+        fx_log_info!("flushing cobalt events");
         let () = cobalt_forwarder_task.await;
 
         let (state, mode, _packages) = match attempt_res {
@@ -244,6 +246,7 @@
             }
         };
 
+        fx_log_info!("checking if reboot is required or should be deferred, mode: {:?}", mode);
         // Figure out if we should reboot.
         match mode {
             // First priority: Always reboot on ForceRecovery success, even if the caller
diff --git a/src/sys/pkg/bin/system-updater/src/update/channel.rs b/src/sys/pkg/bin/system-updater/src/update/channel.rs
index 4636967..0afb2bb 100644
--- a/src/sys/pkg/bin/system-updater/src/update/channel.rs
+++ b/src/sys/pkg/bin/system-updater/src/update/channel.rs
@@ -4,12 +4,13 @@
 
 use {
     anyhow::{Context, Error},
-    fuchsia_syslog::fx_log_err,
+    fuchsia_syslog::{fx_log_err, fx_log_info},
     std::fs,
 };
 
 /// Persists the current channel after a successful update.
 pub async fn update_current_channel() {
+    fx_log_info!("updating current channel");
     const TARGET_PATH: &str = "/misc/ota/target_channel.json";
     const CURRENT_TEMP_PATH: &str = "/misc/ota/current_channel.json.part";
     const CURRENT_PATH: &str = "/misc/ota/current_channel.json";
diff --git a/src/sys/pkg/lib/omaha-client/src/metrics.rs b/src/sys/pkg/lib/omaha-client/src/metrics.rs
index 5e6a60b..55aed03 100644
--- a/src/sys/pkg/lib/omaha-client/src/metrics.rs
+++ b/src/sys/pkg/lib/omaha-client/src/metrics.rs
@@ -3,7 +3,7 @@
 // found in the LICENSE file.
 
 use {
-    crate::protocol::request::InstallSource,
+    crate::protocol::request::{Event, InstallSource},
     anyhow::Error,
     std::{cell::RefCell, rc::Rc, time::Duration},
 };
@@ -42,8 +42,10 @@
     /// running that software, it is sent after the reboot (and includes the
     /// rebooting time).
     WaitedForRebootDuration(Duration),
-    /// Number of time an update failed to boot into new version.
+    /// Number of times an update failed to boot into new version.
     FailedBootAttempts(u64),
+    /// Record that an Omaha event report was lost.
+    OmahaEventLost(Event),
 }
 
 #[derive(Debug, Eq, PartialEq)]
diff --git a/src/sys/pkg/lib/omaha-client/src/state_machine.rs b/src/sys/pkg/lib/omaha-client/src/state_machine.rs
index 0a0454b..3409052 100644
--- a/src/sys/pkg/lib/omaha-client/src/state_machine.rs
+++ b/src/sys/pkg/lib/omaha-client/src/state_machine.rs
@@ -1048,6 +1048,7 @@
         }
         request_builder = request_builder.session_id(session_id.clone()).request_id(GUID::new());
         if let Err(e) = self.do_omaha_request_and_update_context(&request_builder, co).await {
+            self.report_metrics(Metrics::OmahaEventLost(event));
             warn!("Unable to report event to Omaha: {:?}", e);
         }
     }
@@ -1871,6 +1872,52 @@
     }
 
     #[test]
+    fn test_metrics_report_omaha_event_lost() {
+        block_on(async {
+            // This is sufficient to trigger a lost Omaha event as oneshot triggers an
+            // update check, which gets the invalid response (but hasn't checked the
+            // validity yet). This invalid response still contains an OK status, resulting
+            // in the UpdateCheckResponseTime and RequestsPerCheck events being generated
+            // reporting success.
+            //
+            // The response is then parsed and found to be incorrect; this parse error is
+            // attempted to be sent back to Omaha as an event with the ParseResponse error
+            // associated. However, the MockHttpRequest has already consumed the one
+            // response it knew how to give; this event is reported via HTTP, but is "lost"
+            // because the mock responds with a 500 error when it has no responses left to
+            // return.
+            //
+            // That finally results in the OmahaEventLost.
+            let http = MockHttpRequest::new(HttpResponse::new("invalid response".into()));
+            let mut metrics_reporter = MockMetricsReporter::new();
+            let _response = StateMachineBuilder::new_stub()
+                .http(http)
+                .metrics_reporter(&mut metrics_reporter)
+                .oneshot()
+                .await;
+
+            // FIXME(https://github.com/rust-lang/rustfmt/issues/4530) rustfmt doesn't wrap slice
+            // patterns yet.
+            #[rustfmt::skip]
+            assert_matches!(
+                metrics_reporter.metrics.as_slice(),
+                [
+                    Metrics::UpdateCheckResponseTime { response_time: _, successful: true },
+                    Metrics::RequestsPerCheck { count: 1, successful: true },
+                    Metrics::OmahaEventLost(Event {
+                        event_type: EventType::UpdateComplete,
+                        event_result: EventResult::Error,
+                        errorcode: Some(EventErrorCode::ParseResponse),
+                        previous_version: None,
+                        next_version: None,
+                        download_time_ms: None,
+                    })
+                ]
+            );
+        });
+    }
+
+    #[test]
     fn test_metrics_report_update_check_response_time() {
         block_on(async {
             let mut metrics_reporter = MockMetricsReporter::new();
@@ -1954,6 +2001,14 @@
                     Metrics::UpdateCheckResponseTime { response_time: _, successful: false },
                     Metrics::UpdateCheckResponseTime { response_time: _, successful: true },
                     Metrics::RequestsPerCheck { count: 3, successful: true },
+                    Metrics::OmahaEventLost(Event {
+                        event_type: EventType::UpdateComplete,
+                        event_result: EventResult::Error,
+                        errorcode: Some(EventErrorCode::ParseResponse),
+                        previous_version: None,
+                        next_version: None,
+                        download_time_ms: None
+                    }),
                 ]
             );
         });
@@ -2534,7 +2589,10 @@
                 [
                     Metrics::UpdateCheckResponseTime { response_time: _, successful: true },
                     Metrics::RequestsPerCheck { count: 1, successful: true },
+                    Metrics::OmahaEventLost(Event { event_type: EventType::UpdateDownloadStarted, event_result: EventResult::Success, .. }),
                     Metrics::SuccessfulUpdateDuration(install_duration),
+                    Metrics::OmahaEventLost(Event { event_type: EventType::UpdateDownloadFinished, event_result: EventResult::Success, .. }),
+                    Metrics::OmahaEventLost(Event { event_type: EventType::UpdateComplete, event_result: EventResult::Success, .. }),
                     Metrics::SuccessfulUpdateFromFirstSeen(duration_since_first_seen),
                     Metrics::AttemptsToSuccessfulCheck(1),
                     Metrics::AttemptsToSuccessfulInstall { count: 1, successful: true },
@@ -2707,7 +2765,10 @@
                 [
                     Metrics::UpdateCheckResponseTime { response_time: _, successful: true },
                     Metrics::RequestsPerCheck { count: 1, successful: true },
+                    Metrics::OmahaEventLost(Event { event_type: EventType::UpdateDownloadStarted, event_result: EventResult::Success, .. }),
                     Metrics::SuccessfulUpdateDuration(_),
+                    Metrics::OmahaEventLost(Event { event_type: EventType::UpdateDownloadFinished, event_result: EventResult::Success, .. }),
+                    Metrics::OmahaEventLost(Event { event_type: EventType::UpdateComplete, event_result: EventResult::Success, .. }),
                     Metrics::SuccessfulUpdateFromFirstSeen(_),
                     Metrics::AttemptsToSuccessfulCheck(1),
                     Metrics::AttemptsToSuccessfulInstall { count: 1, successful: true },
@@ -2775,6 +2836,7 @@
                     Metrics::UpdateCheckResponseTime { response_time: _, successful: true },
                     Metrics::RequestsPerCheck { count: 1, successful: true },
                     Metrics::SuccessfulUpdateDuration(_),
+                    Metrics::OmahaEventLost(Event { .. }),
                     Metrics::SuccessfulUpdateFromFirstSeen(_),
                     Metrics::AttemptsToSuccessfulCheck(1),
                     Metrics::AttemptsToSuccessfulInstall { count: 2, successful: true }
diff --git a/src/sys/time/BUILD.gn b/src/sys/time/BUILD.gn
index e626d68..90546d2 100644
--- a/src/sys/time/BUILD.gn
+++ b/src/sys/time/BUILD.gn
@@ -16,9 +16,7 @@
     "httpsdate_time_source:tests",
     "lib/httpdate-hyper:tests",
     "lib/inspect-writable:tests",
-    "lib/network_time:tests",
     "lib/push-source:tests",
-    "network_time_service:tests",
     "timekeeper:tests",
     "timekeeper_integration",
   ]
diff --git a/src/sys/time/lib/network_time/BUILD.gn b/src/sys/time/lib/network_time/BUILD.gn
deleted file mode 100644
index 8f69577..0000000
--- a/src/sys/time/lib/network_time/BUILD.gn
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2017 The Fuchsia Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import("//build/package.gni")
-import("//build/test/test_package.gni")
-import("//build/testing/environments.gni")
-
-group("tests") {
-  testonly = true
-  deps = [ ":network-time-tests" ]
-}
-
-test_package("network-time-tests") {
-  deps = [ ":network_time_unittests" ]
-
-  tests = [
-    {
-      name = "network_time_tests"
-      environments = basic_envs
-    },
-  ]
-}
-
-source_set("lib") {
-  sources = [
-    "roughtime_server.cc",
-    "roughtime_server.h",
-    "time_server_config.cc",
-    "time_server_config.h",
-  ]
-
-  deps = [
-    "//sdk/lib/syslog/cpp",
-    "//src/lib/fxl",
-    "//third_party/boringssl",
-    "//third_party/roughtime:client_lib",
-    "//zircon/public/lib/fit",
-    "//zircon/public/lib/zx",
-
-    # TODO(fxbug.dev/57392): Move it back to //third_party once unification completes.
-    "//zircon/third_party/rapidjson",
-  ]
-}
-
-executable("network_time_unittests") {
-  output_name = "network_time_tests"
-
-  testonly = true
-
-  sources = [
-    "roughtime_server_test.cc",
-    "time_server_config_test.cc",
-  ]
-
-  deps = [
-    ":lib",
-    "//src/lib/files",
-    "//src/lib/fxl",
-    "//src/lib/fxl/test:gtest_main",
-    "//third_party/boringssl",
-    "//third_party/roughtime:client_lib",
-    "//zircon/public/lib/zx",
-  ]
-
-  # TODO(fxbug.dev/58162): delete the below and fix compiler warnings
-  configs += [ "//build/config:Wno-conversion" ]
-}
diff --git a/src/sys/time/lib/network_time/README.md b/src/sys/time/lib/network_time/README.md
deleted file mode 100644
index 0c094a3..0000000
--- a/src/sys/time/lib/network_time/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-Network Time
-============
-
-This library uses the roughtime service to estimate the current time.
diff --git a/src/sys/time/lib/network_time/meta/network_time_tests.cmx b/src/sys/time/lib/network_time/meta/network_time_tests.cmx
deleted file mode 100644
index 647fc57..0000000
--- a/src/sys/time/lib/network_time/meta/network_time_tests.cmx
+++ /dev/null
@@ -1,23 +0,0 @@
-{
-    "facets": {
-        "fuchsia.test": {
-            "injected-services": {
-                "fuchsia.posix.socket.Provider": "fuchsia-pkg://fuchsia.com/netstack#meta/netstack.cmx"
-            }
-        }
-    },
-    "include": [
-        "sdk/lib/diagnostics/syslog/client.shard.cmx"
-    ],
-    "program": {
-        "binary": "test/network_time_tests"
-    },
-    "sandbox": {
-        "features": [
-            "isolated-temp"
-        ],
-        "services": [
-            "fuchsia.posix.socket.Provider"
-        ]
-    }
-}
diff --git a/src/sys/time/lib/network_time/roughtime-servers.json b/src/sys/time/lib/network_time/roughtime-servers.json
deleted file mode 100644
index 0610ed8..0000000
--- a/src/sys/time/lib/network_time/roughtime-servers.json
+++ /dev/null
@@ -1,13 +0,0 @@
-{
-  "servers": [
-    {
-      "name": "Google",
-      "publicKey": "7ad3da688c5c04c635a14786a70bcf30224cc25455371bf9d4a2bfb64b682534",
-      "addresses": [
-        {
-          "address": "roughtime.sandbox.google.com:2002"
-        }
-      ]
-    }
-  ]
-}
diff --git a/src/sys/time/lib/network_time/roughtime_server.cc b/src/sys/time/lib/network_time/roughtime_server.cc
deleted file mode 100644
index 74f00929..0000000
--- a/src/sys/time/lib/network_time/roughtime_server.cc
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright 2017 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/sys/time/lib/network_time/roughtime_server.h"
-
-#include <client.h>
-#include <errno.h>
-#include <lib/fit/defer.h>
-#include <lib/syslog/cpp/macros.h>
-#include <netdb.h>
-#include <poll.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <zircon/syscalls.h>
-#include <zircon/types.h>
-
-#include <string>
-
-#include <fbl/unique_fd.h>
-#include <openssl/rand.h>
-
-namespace time_server {
-
-bool RoughTimeServer::IsValid() const { return valid_; }
-
-std::pair<Status, std::optional<zx::time_utc>> RoughTimeServer::GetTimeFromServer() const {
-  if (!IsValid()) {
-    FX_LOGS_FIRST_N(ERROR, 1) << "time server not supported: " << address_;
-    return {NOT_SUPPORTED, {}};
-  }
-  // Create Socket
-  const size_t colon_offset = address_.rfind(':');
-  if (colon_offset == std::string::npos) {
-    FX_LOGS_FIRST_N(ERROR, 1) << "no port number in server address: " << address_;
-    return {NOT_SUPPORTED, {}};
-  }
-
-  std::string host(address_.substr(0, colon_offset));
-  const std::string port_str(address_.substr(colon_offset + 1));
-
-  struct addrinfo hints;
-  memset(&hints, 0, sizeof(hints));
-  hints.ai_socktype = SOCK_DGRAM;
-  hints.ai_protocol = IPPROTO_UDP;
-  hints.ai_flags = AI_NUMERICSERV;
-
-  if (!host.empty() && host[0] == '[' && host[host.size() - 1] == ']') {
-    host = host.substr(1, host.size() - 1);
-    hints.ai_family = AF_INET6;
-    hints.ai_flags |= AI_NUMERICHOST;
-  }
-
-  struct addrinfo* addrs;
-  int err = getaddrinfo(host.c_str(), port_str.c_str(), &hints, &addrs);
-  if (err != 0) {
-    FX_LOGS_FIRST_N(WARNING, 1) << "resolving " << address_ << ": " << gai_strerror(err);
-    return {NETWORK_ERROR, {}};
-  }
-  auto ac1 = fit::defer([&]() { freeaddrinfo(addrs); });
-  fbl::unique_fd sock_ufd(socket(addrs->ai_family, addrs->ai_socktype, addrs->ai_protocol));
-  if (!sock_ufd.is_valid()) {
-    FX_LOGS_FIRST_N(WARNING, 1) << "creating UDP socket: " << strerror(errno);
-    return {NETWORK_ERROR, {}};
-  }
-  int sock_fd = sock_ufd.get();
-
-  if (connect(sock_fd, addrs->ai_addr, addrs->ai_addrlen)) {
-    FX_LOGS_FIRST_N(WARNING, 1) << "connecting UDP socket: " << strerror(errno);
-    return {NETWORK_ERROR, {}};
-  }
-
-  char dest_str[INET6_ADDRSTRLEN];
-  err = getnameinfo(addrs->ai_addr, addrs->ai_addrlen, dest_str, sizeof(dest_str), NULL, 0,
-                    NI_NUMERICHOST);
-
-  if (err != 0) {
-    FX_LOGS_FIRST_N(WARNING, 1) << "getnameinfo: " << gai_strerror(err);
-    return {NETWORK_ERROR, {}};
-  }
-
-  FX_VLOGS(1) << "Sending request to " << dest_str << ", port " << port_str;
-
-  uint8_t nonce[roughtime::kNonceLength];
-  RAND_bytes(nonce, sizeof(nonce));
-  const std::string request = roughtime::CreateRequest(nonce);
-
-  int timeout = 3 * 1000;  // in milliseconds
-
-  ssize_t r;
-  do {
-    r = send(sock_fd, request.data(), request.size(), 0);
-  } while (r == -1 && errno == EINTR);
-
-  // clock_get returns ns since start of clock. See
-  // docs/zircon/syscalls/clock_get.md.
-  const zx::time start{zx_clock_get_monotonic()};
-
-  if (r < 0 || static_cast<size_t>(r) != request.size()) {
-    FX_LOGS_FIRST_N(WARNING, 1) << "send on UDP socket" << strerror(errno);
-    return {NETWORK_ERROR, {}};
-  }
-
-  uint8_t recv_buf[roughtime::kMinRequestSize];
-  ssize_t buf_len;
-  pollfd readfd;
-  readfd.fd = sock_fd;
-  readfd.events = POLLIN;
-  fd_set readfds;
-  FD_SET(sock_fd, &readfds);
-  int ret = poll(&readfd, 1, timeout);
-  if (ret < 0) {
-    FX_LOGS_FIRST_N(WARNING, 1) << "poll on UDP socket: " << strerror(errno);
-    return {NETWORK_ERROR, {}};
-  }
-  if (ret == 0) {
-    FX_LOGS_FIRST_N(WARNING, 1) << "timeout while poll";
-    return {NETWORK_ERROR, {}};
-  }
-  if (readfd.revents != POLLIN) {
-    FX_LOGS_FIRST_N(WARNING, 1) << "poll, revents = " << readfd.revents;
-    return {NETWORK_ERROR, {}};
-  }
-  buf_len = recv(sock_fd, recv_buf, sizeof(recv_buf), 0 /* flags */);
-
-  const zx::time end{zx_clock_get_monotonic()};
-  const zx::duration drift = (end - start) / 2;
-
-  if (buf_len == -1) {
-    FX_LOGS_FIRST_N(WARNING, 1) << "recv from UDP socket: " << strerror(errno);
-    return {NETWORK_ERROR, {}};
-  }
-
-  uint32_t radius;
-  std::string error;
-  uint64_t timestamp_us;
-  if (!roughtime::ParseResponse(&timestamp_us, &radius, &error, public_key_, recv_buf, buf_len,
-                                nonce)) {
-    FX_LOGS(WARNING) << "response from " << address_ << " failed verification: " << error;
-    return {BAD_RESPONSE, {}};
-  }
-
-  // zx_time_t is nanoseconds, timestamp_us is microseconds.
-  zx::time_utc timestamp{ZX_USEC(timestamp_us)};
-  return {OK, timestamp - drift};
-}
-
-}  // namespace time_server
diff --git a/src/sys/time/lib/network_time/roughtime_server.h b/src/sys/time/lib/network_time/roughtime_server.h
deleted file mode 100644
index 7171ad5..0000000
--- a/src/sys/time/lib/network_time/roughtime_server.h
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2017 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SRC_SYS_TIME_LIB_NETWORK_TIME_ROUGHTIME_SERVER_H_
-#define SRC_SYS_TIME_LIB_NETWORK_TIME_ROUGHTIME_SERVER_H_
-
-#include <lib/zx/time.h>
-#include <protocol.h>
-#include <stdint.h>
-#include <string.h>
-
-#include <map>
-#include <optional>
-#include <string>
-#include <utility>
-
-namespace time_server {
-
-enum Status {
-  OK,
-  NOT_SUPPORTED,  // Server conf is invalid/not supported
-  BAD_RESPONSE,   // Bad response from server, most probably can't verify
-                  // certificate
-  NETWORK_ERROR   // Either timeout while poll or error with other network
-                  // operations
-};
-
-class RoughTimeServer {
- public:
-  bool IsValid() const;
-  std::pair<Status, std::optional<zx::time_utc>> GetTimeFromServer() const;
-  RoughTimeServer(std::string name, std::string address, uint8_t public_key[], int public_key_len)
-      : name_(std::move(name)), address_(std::move(address)) {
-    if (public_key_len != roughtime::kPublicKeyLength) {
-      valid_ = false;
-      return;
-    }
-    valid_ = true;
-    memcpy(public_key_, public_key, roughtime::kPublicKeyLength);
-  }
-  ~RoughTimeServer() = default;
-
- private:
-  bool valid_ = false;
-  std::string name_;
-  std::string address_;
-  uint8_t public_key_[roughtime::kPublicKeyLength];
-};
-
-}  // namespace time_server
-
-#endif  // SRC_SYS_TIME_LIB_NETWORK_TIME_ROUGHTIME_SERVER_H_
diff --git a/src/sys/time/lib/network_time/roughtime_server_test.cc b/src/sys/time/lib/network_time/roughtime_server_test.cc
deleted file mode 100644
index 6840ef6..0000000
--- a/src/sys/time/lib/network_time/roughtime_server_test.cc
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2017 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/sys/time/lib/network_time/roughtime_server.h"
-
-#include <arpa/inet.h>
-#include <netdb.h>
-#include <netinet/in.h>
-#include <poll.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-
-#include <thread>
-
-#include <fbl/unique_fd.h>
-#include <gtest/gtest.h>
-
-#define PORT 3453
-
-namespace time_server {
-
-TEST(RoughTimeServerTest, TestValid) {
-  uint8_t key[roughtime::kPublicKeyLength] = {0};
-  RoughTimeServer server1("name", "address:3424", key, roughtime::kPublicKeyLength + 1);
-  EXPECT_EQ(server1.IsValid(), false);
-
-  RoughTimeServer server2("name", "address:3424", key, roughtime::kPublicKeyLength);
-  EXPECT_EQ(server2.IsValid(), true);
-}
-
-#define BUFSIZE 1024
-void listen(int sock) {
-  struct sockaddr_in client;
-  int attempts = 3;
-  int ret;
-  do {
-    attempts--;
-    int timeout = 3 * 1000;
-    pollfd readfd;
-    readfd.fd = sock;
-    readfd.events = POLLIN;
-    ret = poll(&readfd, 1, timeout);
-  } while (attempts > 0 && ret != 1);
-  ASSERT_NE(ret, 0) << "poll timeout";
-  ASSERT_EQ(ret, 1) << "poll: " << strerror(errno);
-  char buf[BUFSIZE];
-  socklen_t len = sizeof(client);
-  int n = recvfrom(sock, buf, BUFSIZE, 0, (struct sockaddr*)&client, &len);
-  ASSERT_GE(n, 0) << "recvfrom: " << strerror(errno);
-  struct hostent* host =
-      gethostbyaddr((const char*)&client.sin_addr.s_addr, sizeof(client.sin_addr.s_addr), AF_INET);
-  ASSERT_NE(host, nullptr) << "gethostbyaddr: " << strerror(errno);
-  char* hostaddr = inet_ntoa(client.sin_addr);
-  ASSERT_NE(hostaddr, nullptr) << "inet_ntoa: " << strerror(errno);
-  n = sendto(sock, buf, strlen(buf), 0, (struct sockaddr*)&client, len);
-  ASSERT_GE(n, 0) << "sendto: " << strerror(errno);
-}
-
-// Checks that server recieves request from network_time
-TEST(RoughTimeServerTest, TestServerRequest) {
-  uint8_t key[roughtime::kPublicKeyLength] = {0};
-  RoughTimeServer server("name", "127.0.0.1:" + std::to_string(PORT), key,
-                         roughtime::kPublicKeyLength);
-  EXPECT_EQ(server.IsValid(), true);
-
-  // Start server
-  struct sockaddr_in serveraddr;
-
-  memset(&serveraddr, 0, sizeof(serveraddr));
-  fbl::unique_fd sock_ufd(socket(AF_INET, SOCK_DGRAM, 0));
-  ASSERT_TRUE(sock_ufd.is_valid()) << "udp server: socket call" << strerror(errno);
-  int sock = sock_ufd.get();
-
-  serveraddr.sin_family = AF_INET;
-  serveraddr.sin_addr.s_addr = INADDR_ANY;
-  serveraddr.sin_port = htons(PORT);
-
-  ASSERT_EQ(bind(sock, (struct sockaddr*)&serveraddr, sizeof(serveraddr)), 0)
-      << "binding udp: " << strerror(errno);
-  std::thread t1(listen, sock);
-
-  std::pair<time_server::Status, std::optional<zx::time_utc>> ret;
-  int attempts = 3;
-  do {
-    attempts--;
-    ret = server.GetTimeFromServer();
-  } while (attempts > 0 && ret.first == time_server::Status::NETWORK_ERROR);
-  t1.join();
-}
-
-}  // namespace time_server
diff --git a/src/sys/time/lib/network_time/test/BUILD.gn b/src/sys/time/lib/network_time/test/BUILD.gn
deleted file mode 100644
index f931601..0000000
--- a/src/sys/time/lib/network_time/test/BUILD.gn
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright 2018 The Fuchsia Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import("//build/test/test_package.gni")
-import("//build/testing/environments.gni")
-
-source_set("roughtime_test_lib") {
-  testonly = true
-  sources = [
-    "common.h",
-    "local_roughtime_server.cc",
-    "local_roughtime_server.h",
-    "settable_time_source.cc",
-    "settable_time_source.h",
-  ]
-
-  deps = [
-    "//sdk/fidl/fuchsia.io",
-    "//sdk/lib/sys/cpp",
-    "//sdk/lib/sys/cpp/testing:integration",
-    "//sdk/lib/syslog/cpp",
-    "//sdk/lib/vfs/cpp",
-    "//src/lib/fsl",
-    "//src/lib/fxl",
-    "//src/lib/fxl/test:gtest_main",
-    "//src/sys/time/lib/network_time:lib",
-    "//third_party/boringssl",
-    "//third_party/googletest:gmock",
-    "//third_party/roughtime:client_lib",
-    "//third_party/roughtime:simple_server_lib",
-  ]
-}
diff --git a/src/sys/time/lib/network_time/test/common.h b/src/sys/time/lib/network_time/test/common.h
deleted file mode 100644
index bcec416..0000000
--- a/src/sys/time/lib/network_time/test/common.h
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2020 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SRC_SYS_TIME_LIB_NETWORK_TIME_TEST_COMMON_H_
-#define SRC_SYS_TIME_LIB_NETWORK_TIME_TEST_COMMON_H_
-
-#include "src/lib/fxl/strings/string_printf.h"
-#include "third_party/roughtime/protocol.h"
-
-namespace time_server {
-
-#define NETWORK_TIME_TEST_PUBLIC_KEY                                                              \
-  0x3b, 0x6a, 0x27, 0xbc, 0xce, 0xb6, 0xa4, 0x2d, 0x62, 0xa3, 0xa8, 0xd0, 0x2a, 0x6f, 0x0d, 0x73, \
-      0x65, 0x32, 0x15, 0x77, 0x1d, 0xe2, 0x43, 0xa6, 0x3a, 0xc0, 0x48, 0xa1, 0x8b, 0x59, 0xda,   \
-      0x29
-
-// Ed25519 private key used by a test roughtime server. The
-// private part consists of all zeros and so is only for use in this example.
-constexpr uint8_t kTestPrivateKey[roughtime::kPrivateKeyLength] = {
-    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, NETWORK_TIME_TEST_PUBLIC_KEY};
-
-// Same as the second half of the private key, but there's no sane way to avoid
-// duplicating this code without macros.
-constexpr uint8_t kTestPublicKey[roughtime::kPublicKeyLength] = {NETWORK_TIME_TEST_PUBLIC_KEY};
-
-#undef NETWORK_TIME_TEST_PUBLIC_KEY
-
-constexpr uint8_t kWrongPrivateKey[roughtime::kPrivateKeyLength] = {};
-
-// Copied from zircon/lib/fidl/array_to_string
-std::string to_hex_string(const uint8_t* data, size_t size) {
-  constexpr char kHexadecimalCharacters[] = "0123456789abcdef";
-  std::string ret;
-  ret.reserve(size * 2);
-  for (size_t i = 0; i < size; i++) {
-    unsigned char c = data[i];
-    ret.push_back(kHexadecimalCharacters[c >> 4]);
-    ret.push_back(kHexadecimalCharacters[c & 0xf]);
-  }
-  return ret;
-}
-
-// Creates a client config for a roughtime server listening on [::1]:port
-std::string local_client_config(uint16_t port) {
-  // Note that the host must explicitly be "::1". "localhost" is
-  // misinterpreted as implying IPv4.
-  return fxl::StringPrintf(
-      R"(
-{
-  "servers":
-  [
-    {
-      "name": "Local",
-      "publicKey": "%s",
-      "addresses":
-        [
-          {
-            "address": "::1:%d"
-          }
-        ]
-    }
-  ]
-})",
-      to_hex_string(kTestPublicKey, roughtime::kPublicKeyLength).c_str(), port);
-}
-
-}  //  namespace time_server
-
-#endif  // SRC_SYS_TIME_LIB_NETWORK_TIME_TEST_COMMON_H_
diff --git a/src/sys/time/lib/network_time/test/local_roughtime_server.cc b/src/sys/time/lib/network_time/test/local_roughtime_server.cc
deleted file mode 100644
index 88f8a69..0000000
--- a/src/sys/time/lib/network_time/test/local_roughtime_server.cc
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2018 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "local_roughtime_server.h"
-
-#include <arpa/inet.h>
-#include <fcntl.h>
-#include <lib/syslog/cpp/macros.h>
-#include <netinet/in.h>
-#include <stdlib.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-
-#include <gtest/gtest.h>
-
-#include "settable_time_source.h"
-#include "third_party/roughtime/protocol.h"
-#include "third_party/roughtime/simple_server.h"
-#include "third_party/roughtime/udp_processor.h"
-
-namespace time_server {
-
-using roughtime::Identity;
-using roughtime::SimpleServer;
-using roughtime::TimeSource;
-
-LocalRoughtimeServer::LocalRoughtimeServer(SettableTimeSource* time_source,
-                                           std::unique_ptr<SimpleServer> simple_server,
-                                           uint16_t port_number)
-    : time_source_(time_source),
-      simple_server_(std::move(simple_server)),
-      port_number_(port_number),
-      is_running_(false) {}
-
-// static
-std::unique_ptr<LocalRoughtimeServer> LocalRoughtimeServer::MakeInstance(
-    const uint8_t private_key[roughtime::kPrivateKeyLength], uint16_t preferred_port_number,
-    roughtime::rough_time_t initial_time_micros) {
-  constexpr roughtime::rough_time_t min_time_micros = 0;
-  constexpr roughtime::rough_time_t max_time_micros = std::numeric_limits<uint64_t>::max();
-  auto identity = SimpleServer::MakeIdentity(private_key, min_time_micros, max_time_micros);
-  auto time_source = std::make_unique<SettableTimeSource>(initial_time_micros);
-  // Capture a regular pointer here because |simple_server| needs to own the
-  // unique_ptr.
-  SettableTimeSource* time_source_ptr = time_source.get();
-
-  int fd;
-  uint16_t actual_port = 0;
-  roughtime::UdpProcessor::MakeSocket(preferred_port_number, &fd, &actual_port);
-  EXPECT_NE(actual_port, 0);
-  FX_LOGS(INFO) << "Starting LocalRoughtimeServer on port " << actual_port;
-
-  std::unique_ptr<SimpleServer> simple_server =
-      std::make_unique<SimpleServer>(std::move(identity), std::move(time_source), fd);
-
-  // Using |new| instead of |make_unique| because constructor is private
-  return std::unique_ptr<LocalRoughtimeServer>(
-      new LocalRoughtimeServer(time_source_ptr, std::move(simple_server), actual_port));
-}
-
-void LocalRoughtimeServer::Start() {
-  is_running_ = true;
-  while (IsRunning()) {
-    simple_server_->ProcessBatch();
-  }
-}
-
-void LocalRoughtimeServer::Stop() { is_running_ = false; }
-
-bool LocalRoughtimeServer::IsRunning() { return is_running_; }
-
-void LocalRoughtimeServer::SetTime(roughtime::rough_time_t server_time_micros) {
-  EXPECT_NE(time_source_, nullptr);
-  time_source_->SetTime(server_time_micros);
-}
-
-void LocalRoughtimeServer::SetTime(uint16_t year, uint8_t month, uint8_t day, uint8_t hour,
-                                   uint8_t min, uint8_t sec) {
-  struct tm time = {.tm_sec = sec,
-                    .tm_min = min,
-                    .tm_hour = hour,
-                    .tm_mday = day,
-                    .tm_mon = month - 1,
-                    .tm_year = year - 1900};
-  time_t epoch_seconds = timegm(&time);
-  SetTime(static_cast<roughtime::rough_time_t>(epoch_seconds * 1'000'000));
-}
-
-uint16_t LocalRoughtimeServer::GetPortNumber() const { return port_number_; }
-
-}  // namespace time_server
diff --git a/src/sys/time/lib/network_time/test/local_roughtime_server.h b/src/sys/time/lib/network_time/test/local_roughtime_server.h
deleted file mode 100644
index 8571f33c..0000000
--- a/src/sys/time/lib/network_time/test/local_roughtime_server.h
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2018 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SRC_SYS_TIME_LIB_NETWORK_TIME_TEST_LOCAL_ROUGHTIME_SERVER_H_
-#define SRC_SYS_TIME_LIB_NETWORK_TIME_TEST_LOCAL_ROUGHTIME_SERVER_H_
-
-#include <thread>
-
-#include "settable_time_source.h"
-#include "third_party/roughtime/server.h"
-#include "third_party/roughtime/simple_server.h"
-
-namespace time_server {
-
-// A wrapper around Roughtime's simple server for hermetic tests. Returns a
-// static time value set at creation time or updated using |SetTime|. Does not
-// automatically increment the time.
-//
-// Construct using |LocalRoughtimeServer::MakeInstance|.
-class LocalRoughtimeServer {
- public:
-  // Factory method.
-  static std::unique_ptr<LocalRoughtimeServer> MakeInstance(
-      const uint8_t private_key[], uint16_t preferred_port_number,
-      roughtime::rough_time_t initial_time_micros);
-
-  // Starts the server. It will run in a loop until |Stop| is called, so it must
-  // be started in a separate thread.
-  void Start();
-
-  // Stops the server.
-  void Stop();
-
-  // Returns the true if the server is running.
-  bool IsRunning();
-
-  // Sets the constant time that is returned by the server.
-  void SetTime(roughtime::rough_time_t server_time_micros);
-
-  // Sets the constant time that is returned by the server.
-  //
-  // Params:
-  //   year: four-digit year (e.g. 2019)
-  //   month: 1-12
-  //   day: 1-31
-  //   hour: 0-23
-  //   min: 0-59
-  //   sec: 0-59
-  void SetTime(uint16_t year, uint8_t month, uint8_t day, uint8_t hour, uint8_t min, uint8_t sec);
-
-  // Gets the server port number, which can differ from the port requested in
-  // |MakeInstance| if that port was already taken.
-  uint16_t GetPortNumber() const;
-
- private:
-  // Private because it should only be accessed through a factory method.
-  explicit LocalRoughtimeServer(SettableTimeSource* time_source,
-                                std::unique_ptr<roughtime::SimpleServer> simple_server,
-                                uint16_t port_number);
-
-  // Not owned.
-  SettableTimeSource* const time_source_;
-
-  std::unique_ptr<roughtime::SimpleServer> simple_server_;
-
-  uint16_t port_number_ = 0;
-  std::atomic<bool> is_running_;
-};
-
-}  // namespace time_server
-
-#endif  // SRC_SYS_TIME_LIB_NETWORK_TIME_TEST_LOCAL_ROUGHTIME_SERVER_H_
diff --git a/src/sys/time/lib/network_time/test/settable_time_source.cc b/src/sys/time/lib/network_time/test/settable_time_source.cc
deleted file mode 100644
index 590f4e8..0000000
--- a/src/sys/time/lib/network_time/test/settable_time_source.cc
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2018 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "settable_time_source.h"
-
-#include "third_party/roughtime/protocol.h"
-#include "third_party/roughtime/time_source.h"
-
-namespace time_server {
-
-// Uncertainty radius for current time.
-static constexpr unsigned int kUncertaintyMicros = 5'000'000;
-
-SettableTimeSource::SettableTimeSource() : SettableTimeSource(0) {}
-
-SettableTimeSource::SettableTimeSource(roughtime::rough_time_t initial_time_micros)
-    : now_micros_(initial_time_micros) {}
-SettableTimeSource::SettableTimeSource(SettableTimeSource&& rhs) noexcept = default;
-
-SettableTimeSource& SettableTimeSource::operator=(time_server::SettableTimeSource const& rhs) =
-    default;
-
-SettableTimeSource::~SettableTimeSource() = default;
-
-void SettableTimeSource::SetTime(roughtime::rough_time_t now_micros) { now_micros_ = now_micros; }
-
-std::pair<roughtime::rough_time_t, uint32_t> SettableTimeSource::Now() {
-  return std::make_pair(now_micros_, kUncertaintyMicros);
-}
-
-}  // namespace time_server
diff --git a/src/sys/time/lib/network_time/test/settable_time_source.h b/src/sys/time/lib/network_time/test/settable_time_source.h
deleted file mode 100644
index 92dba1a..0000000
--- a/src/sys/time/lib/network_time/test/settable_time_source.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2018 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SRC_SYS_TIME_LIB_NETWORK_TIME_TEST_SETTABLE_TIME_SOURCE_H_
-#define SRC_SYS_TIME_LIB_NETWORK_TIME_TEST_SETTABLE_TIME_SOURCE_H_
-
-#include "third_party/roughtime/protocol.h"
-#include "third_party/roughtime/time_source.h"
-
-namespace time_server {
-
-// A |TimeSource| implementation whose current time can be set using |SetTime|.
-// (Note: Time does not advance automatically.)
-//
-// This class is used to provide the time for a local Roughtime server.
-class SettableTimeSource : public roughtime::TimeSource {
- public:
-  SettableTimeSource();
-  explicit SettableTimeSource(roughtime::rough_time_t initial_time_micros);
-
-  SettableTimeSource(SettableTimeSource&& rhs) noexcept;
-  SettableTimeSource& operator=(SettableTimeSource const& rhs);
-
-  ~SettableTimeSource() override;
-
-  // Set the current time
-  void SetTime(roughtime::rough_time_t now_micros);
-  std::pair<roughtime::rough_time_t, uint32_t> Now() override;
-
- private:
-  // Current time in epoch microseconds.
-  // TODO(kpozin): Use std::atomic? Would require custom copy & move ctors.
-  roughtime::rough_time_t now_micros_ = 0;
-};
-
-}  // namespace time_server
-
-#endif  // SRC_SYS_TIME_LIB_NETWORK_TIME_TEST_SETTABLE_TIME_SOURCE_H_
diff --git a/src/sys/time/lib/network_time/time_server_config.cc b/src/sys/time/lib/network_time/time_server_config.cc
deleted file mode 100644
index f234f98..0000000
--- a/src/sys/time/lib/network_time/time_server_config.cc
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2017 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/sys/time/lib/network_time/time_server_config.h"
-
-#include <lib/syslog/cpp/macros.h>
-#include <protocol.h>
-
-#include <fstream>
-#include <sstream>
-#include <string>
-#include <vector>
-
-#include <rapidjson/document.h>
-#include <rapidjson/error/en.h>
-#include <rapidjson/schema.h>
-#include <rapidjson/stringbuffer.h>
-
-#define MULTILINE(...) #__VA_ARGS__
-
-namespace time_server {
-
-const char* config_schema = MULTILINE({
-  "$schema" : "http://json-schema.org/draft-04/schema#",
-  "properties" : {
-    "servers" : {
-      "items" : {
-        "properties" : {
-          "addresses" : {
-            "items" : {
-              "properties" : {"address" : {"type" : "string"}},
-              "required" : ["address"],
-              "type" : "object"
-            },
-            "minItems" : 1,
-            "type" : "array"
-          },
-          "name" : {"type" : "string"},
-          "publicKey" : {"maxLength" : 64, "minLength" : 64, "type" : "string"}
-        },
-        "required" : [ "publicKey", "addresses", "name" ],
-        "type" : "object"
-      },
-      "minItems" : 1,
-      "type" : "array"
-    }
-  },
-  "required" : ["servers"],
-  "type" : "object"
-});
-
-static bool readFile(std::string* out_contents, const char* filename) {
-  std::ifstream serverFile;
-  serverFile.open(filename);
-  if ((serverFile.rdstate() & std::ifstream::failbit) != 0) {
-    FX_LOGS(ERROR) << "Opening " << filename << ": " << strerror(errno);
-    return false;
-  }
-  std::stringstream strStream;
-  strStream << serverFile.rdbuf();
-  out_contents->assign(strStream.str());
-  return true;
-}
-
-std::vector<RoughTimeServer> TimeServerConfig::ServerList() { return server_list_; }
-
-bool checkSchema(rapidjson::Document& d) {
-  rapidjson::Document sd;
-  if (sd.Parse(config_schema).HasParseError()) {
-    FX_LOGS(WARNING) << "Schema not valid";
-    return false;
-  }
-  rapidjson::SchemaDocument schema(sd);
-  rapidjson::SchemaValidator validator(schema);
-  if (!d.Accept(validator)) {
-    // Input JSON is invalid according to the schema
-    // Output diagnostic information
-    rapidjson::StringBuffer sb;
-    validator.GetInvalidSchemaPointer().StringifyUriFragment(sb);
-    FX_LOGS(WARNING) << "Invalid schema: " << sb.GetString();
-    FX_LOGS(WARNING) << "Invalid keyword: " << validator.GetInvalidSchemaKeyword();
-    sb.Clear();
-    validator.GetInvalidDocumentPointer().StringifyUriFragment(sb);
-    FX_LOGS(WARNING) << "Invalid document: " << sb.GetString();
-    return false;
-  }
-  return true;
-}
-
-bool TimeServerConfig::Parse(std::string config_file) {
-  std::string json;
-  if (!readFile(&json, config_file.c_str())) {
-    return false;
-  }
-
-  rapidjson::Document doc;
-  rapidjson::ParseResult ok = doc.Parse(json.c_str());
-  if (!ok) {
-    FX_LOGS(WARNING) << "JSON parse error: " << rapidjson::GetParseError_En(ok.Code()) << "("
-                     << ok.Offset() << ")";
-    return false;
-  }
-  if (!checkSchema(doc)) {
-    return false;
-  }
-
-  const rapidjson::Value& servers = doc["servers"];
-  for (rapidjson::SizeType i = 0; i < servers.Size(); i++) {
-    const rapidjson::Value& server = servers[i];
-
-    const rapidjson::Value& addresses = server["addresses"];
-    std::string name = server["name"].GetString();
-    std::string public_key_str = server["publicKey"].GetString();
-    for (rapidjson::SizeType j = 0; j < addresses.Size(); j++) {
-      const rapidjson::Value& address = addresses[j];
-
-      std::string address_str = address["address"].GetString();
-      uint8_t public_key[roughtime::kPublicKeyLength];
-      if (public_key_str.length() != roughtime::kPublicKeyLength * 2) {
-        FX_LOGS(WARNING) << "Invalid public key: " << public_key_str;
-        return false;
-      }
-      for (unsigned int k = 0; k < roughtime::kPublicKeyLength; k++) {
-        char hex[3] = {0};
-        hex[0] = public_key_str.at(k * 2);
-        hex[1] = public_key_str.at(k * 2 + 1);
-        public_key[k] = (uint8_t)strtoul(hex, NULL, 16);
-      }
-
-      RoughTimeServer server(name, std::move(address_str), public_key, roughtime::kPublicKeyLength);
-      if (server.IsValid()) {
-        server_list_.push_back(server);
-      } else {
-        FX_LOGS(ERROR) << "Roughtime configuration contained invalid server " << name;
-      }
-    }
-  }
-  return server_list_.size() > 0;
-}
-
-}  // namespace time_server
diff --git a/src/sys/time/lib/network_time/time_server_config.h b/src/sys/time/lib/network_time/time_server_config.h
deleted file mode 100644
index a7b4202..0000000
--- a/src/sys/time/lib/network_time/time_server_config.h
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2017 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SRC_SYS_TIME_LIB_NETWORK_TIME_TIME_SERVER_CONFIG_H_
-#define SRC_SYS_TIME_LIB_NETWORK_TIME_TIME_SERVER_CONFIG_H_
-
-#include <string>
-#include <vector>
-
-#include "src/sys/time/lib/network_time/roughtime_server.h"
-
-namespace time_server {
-
-class TimeServerConfig {
- public:
-  bool Parse(std::string server_config_file);
-  std::vector<RoughTimeServer> ServerList();
-
- private:
-  std::vector<RoughTimeServer> server_list_;
-};
-
-}  // namespace time_server
-
-#endif  // SRC_SYS_TIME_LIB_NETWORK_TIME_TIME_SERVER_CONFIG_H_
diff --git a/src/sys/time/lib/network_time/time_server_config_test.cc b/src/sys/time/lib/network_time/time_server_config_test.cc
deleted file mode 100644
index b81586c..0000000
--- a/src/sys/time/lib/network_time/time_server_config_test.cc
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2017 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/sys/time/lib/network_time/time_server_config.h"
-
-#include <gtest/gtest.h>
-
-#include "src/lib/files/scoped_temp_dir.h"
-
-#define INVALID_CONFIGS 4
-
-namespace time_server {
-
-using files::ScopedTempDir;
-
-const std::string invalid_configs[INVALID_CONFIGS] = {
-    R"({
-      "servers" : [ {
-        "name" : "Google",
-        "publicKey" :
-            "3b6a27bcceb6a42d62a3a8d02a6f0d736343215771de243a63ac048a18b59da2"
-            "addresses" : [ {"address" : "address:7898"} ]
-      } ]
-    })",
-    R"({
-      "servers" : [ {
-        "name" : "Google",
-        "publicKey" : "3b6a27bcceb6a42d62a3a8d02a6f0d736434315771de243a63ac048a"
-                      "18b59da29"
-      } ]
-    })",
-    R"({
-      "servers" : [ {
-        "name" : "Google",
-        "publicKey" : "3b6a27bcceb6a42d62a3a8d02a6f0d7365433577",
-        "addresses" : [ {"address" : "address:7898"} ]
-      } ]
-    })",
-    "{}"};
-
-TEST(TimeServerConfigTest, HandlesInvalidInput) {
-  ScopedTempDir tmp_dir;
-  for (auto& invalid_config : invalid_configs) {
-    std::string config_path;
-    tmp_dir.NewTempFileWithData(invalid_config, &config_path);
-    TimeServerConfig config;
-    ASSERT_EQ(config.Parse(config_path), false);
-  }
-}
-
-TEST(TimeServerConfigTest, HandlesValidInput) {
-  const std::string json = R"({
-    "servers" : [ {
-      "name" : "Google",
-      "publicKey" :
-          "3b6a27bcceb6a42d62a3a8d02a6f0d736343215771de243a63ac048a18b59da2",
-      "addresses" : [ {"address" : "address:7898"} ]
-    } ]
-  })";
-  ScopedTempDir tmp_dir;
-  std::string config_path;
-  tmp_dir.NewTempFileWithData(json, &config_path);
-  TimeServerConfig config;
-  ASSERT_EQ(config.Parse(config_path), true);
-  auto server_list = config.ServerList();
-  ASSERT_EQ(server_list.size(), 1u);
-}
-
-TEST(TimeServerConfigTest, HandlesMultipleAddressesInput) {
-  const std::string json = R"({
-    "servers" : [ {
-      "name" : "Google",
-      "publicKey" :
-          "3b6a27bcceb6a42d62a3a8d02a6f0d736343215771de243a63ac048a18b59da2",
-      "addresses" :
-          [ {"address" : "address:7898"}, {"address" : "address2:7898"} ]
-    } ]
-  })";
-  ScopedTempDir tmp_dir;
-  std::string config_path;
-  tmp_dir.NewTempFileWithData(json, &config_path);
-  TimeServerConfig config;
-  ASSERT_EQ(config.Parse(config_path), true);
-  auto server_list = config.ServerList();
-  ASSERT_EQ(server_list.size(), 2u);
-}
-
-TEST(TimeServerConfigTest, HandlesMultipleServerInput) {
-  const std::string json = R"({
-    "servers" : [
-      {
-        "name" : "Google",
-        "publicKey" :
-            "3b6a27bcceb6a42d62a3a8d02a6f0d736343215771de243a63ac048a18b59da2",
-        "addresses" : [ {"address" : "address:7898"} ]
-      },
-      {
-        "name" : "Google2",
-        "publicKey" :
-            "3b6a27bcceb6a42d62a3a8d02a6f0d736343215771de243a63ac048a18b59da2",
-        "addresses" : [ {"address" : "address:7898"} ]
-      }
-    ]
-  })";
-  ScopedTempDir tmp_dir;
-  std::string config_path;
-  tmp_dir.NewTempFileWithData(json, &config_path);
-  TimeServerConfig config;
-  ASSERT_EQ(config.Parse(config_path), true);
-  auto server_list = config.ServerList();
-  ASSERT_EQ(server_list.size(), 2u);
-}
-
-TEST(TimeServerConfigTest, HandlesMultipleServerNAddressesInput) {
-  const std::string json = R"({
-    "servers" : [
-      {
-        "name" : "Google",
-        "publicKey" :
-            "3b6a27bcceb6a42d62a3a8d02a6f0d736343215771de243a63ac048a18b59da2",
-        "addresses" :
-            [ {"address" : "address:7898"}, {"address" : "address2:7898"} ]
-      },
-      {
-        "name" : "Google2",
-        "publicKey" :
-            "3b6a27bcceb6a42d62a3a8d02a6f0d736343215771de243a63ac048a18b59da2",
-        "addresses" : [ {"address" : "address:7898"} ]
-      }
-    ]
-  })";
-  ScopedTempDir tmp_dir;
-  std::string config_path;
-  tmp_dir.NewTempFileWithData(json, &config_path);
-  TimeServerConfig config;
-  ASSERT_EQ(config.Parse(config_path), true);
-  auto server_list = config.ServerList();
-  ASSERT_EQ(server_list.size(), 3u);
-}
-
-}  // namespace time_server
diff --git a/src/sys/time/network_time_service/BUILD.gn b/src/sys/time/network_time_service/BUILD.gn
deleted file mode 100644
index 5df5b7c..0000000
--- a/src/sys/time/network_time_service/BUILD.gn
+++ /dev/null
@@ -1,114 +0,0 @@
-# Copyright 2018 The Fuchsia Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import("//build/package.gni")
-import("//build/test/test_package.gni")
-
-group("tests") {
-  testonly = true
-  deps = [ ":network-time-service-tests" ]
-}
-
-group("network_time_service") {
-  deps = [ ":network-time-service" ]
-}
-
-source_set("lib") {
-  sources = [
-    "inspect.cc",
-    "inspect.h",
-    "service.cc",
-    "service.h",
-    "watcher.h",
-  ]
-
-  public_deps = [
-    "//sdk/fidl/fuchsia.time.external",
-    "//sdk/lib/sys/cpp",
-    "//sdk/lib/sys/inspect/cpp",
-    "//sdk/lib/syslog/cpp",
-    "//src/lib/fxl",
-    "//src/sys/time/lib/network_time:lib",
-    "//third_party/roughtime:client_lib",
-    "//zircon/public/lib/fbl",
-    "//zircon/system/ulib/async-loop:async-loop-cpp",
-    "//zircon/system/ulib/async-loop:async-loop-default",
-  ]
-}
-
-executable("bin") {
-  output_name = "network_time_service"
-
-  sources = [ "main.cc" ]
-
-  deps = [
-    ":lib",
-    "//sdk/lib/sys/cpp",
-    "//src/lib/fsl",
-    "//src/lib/fxl",
-    "//zircon/system/ulib/async-default",
-    "//zircon/system/ulib/async-loop:async-loop-cpp",
-    "//zircon/system/ulib/async-loop:async-loop-default",
-  ]
-}
-
-package("network-time-service") {
-  deps = [ ":bin" ]
-
-  resources = [
-    {
-      path = rebase_path("roughtime-servers.json")
-      dest = "roughtime-servers.json"
-    },
-  ]
-
-  binaries = [
-    {
-      name = "network_time_service"
-    },
-  ]
-  meta = [
-    {
-      path = rebase_path("meta/network_time_service.cmx")
-      dest = "network_time_service.cmx"
-    },
-  ]
-}
-
-test_package("network-time-service-tests") {
-  deps = [ ":network_time_service_unittests" ]
-
-  tests = [
-    {
-      name = "network_time_service_tests"
-      environments = basic_envs
-    },
-  ]
-}
-
-executable("network_time_service_unittests") {
-  output_name = "network_time_service_tests"
-
-  testonly = true
-
-  sources = [
-    "inspect_test.cc",
-    "service_test.cc",
-    "watcher_test.cc",
-  ]
-
-  deps = [
-    ":lib",
-    "//garnet/public/lib/gtest",
-    "//sdk/lib/inspect/testing/cpp",
-    "//sdk/lib/sys/cpp/testing:unit",
-    "//src/lib/files",
-    "//src/lib/fxl",
-    "//src/lib/fxl/test:gtest_main",
-    "//src/sys/time/lib/network_time/test:roughtime_test_lib",
-    "//third_party/boringssl",
-    "//third_party/roughtime:client_lib",
-    "//zircon/public/lib/zx",
-  ]
-}
diff --git a/src/sys/time/network_time_service/inspect.cc b/src/sys/time/network_time_service/inspect.cc
deleted file mode 100644
index 23bc64e..0000000
--- a/src/sys/time/network_time_service/inspect.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2020 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/sys/time/network_time_service/inspect.h"
-
-namespace network_time_service {
-
-std::string FailureStatusAsString(time_server::Status status) {
-  switch (status) {
-    case time_server::NOT_SUPPORTED:
-      return "not_supported";
-    case time_server::BAD_RESPONSE:
-      return "bad_response";
-    case time_server::NETWORK_ERROR:
-      return "network";
-    case time_server::OK:
-      return "unknown";
-  }
-}
-
-Inspect::Inspect(inspect::Node root)
-    : root_node_(std::move(root)),
-      success_count_(root_node_.CreateUint("success_count", 0)),
-      failure_node_(root_node_.CreateChild("failure_count")),
-      failure_counts_() {}
-
-void Inspect::Success() { success_count_.Add(1); }
-
-void Inspect::Failure(time_server::Status status) {
-  if (failure_counts_.find(status) == failure_counts_.end()) {
-    failure_counts_.insert(
-        std::make_pair(status, failure_node_.CreateUint(FailureStatusAsString(status), 1)));
-  } else {
-    failure_counts_[status].Add(1);
-  }
-}
-
-}  // namespace network_time_service
diff --git a/src/sys/time/network_time_service/inspect.h b/src/sys/time/network_time_service/inspect.h
deleted file mode 100644
index b6d54fc..0000000
--- a/src/sys/time/network_time_service/inspect.h
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2020 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SRC_SYS_TIME_NETWORK_TIME_SERVICE_INSPECT_H_
-#define SRC_SYS_TIME_NETWORK_TIME_SERVICE_INSPECT_H_
-
-#include <fuchsia/time/external/cpp/fidl.h>
-#include <lib/sys/inspect/cpp/component.h>
-
-#include <unordered_map>
-
-#include "src/sys/time/lib/network_time/time_server_config.h"
-
-namespace network_time_service {
-
-// Obtain a string representation of status suitable for inspect output.
-std::string FailureStatusAsString(time_server::Status status);
-
-// Wrapper around inspect output that tracks successful and failed polls.
-class Inspect {
- public:
-  explicit Inspect(inspect::Node root);
-  // Record a successful poll.
-  void Success();
-  // Record a failed poll.
-  void Failure(time_server::Status status);
-
- private:
-  inspect::Node root_node_;
-  inspect::UintProperty success_count_;
-  inspect::Node failure_node_;
-  std::unordered_map<time_server::Status, inspect::UintProperty> failure_counts_;
-};
-
-}  // namespace network_time_service
-
-#endif  // SRC_SYS_TIME_NETWORK_TIME_SERVICE_INSPECT_H_
diff --git a/src/sys/time/network_time_service/inspect_test.cc b/src/sys/time/network_time_service/inspect_test.cc
deleted file mode 100644
index 55a29c0..0000000
--- a/src/sys/time/network_time_service/inspect_test.cc
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2020 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/sys/time/network_time_service/inspect.h"
-
-#include <lib/async/cpp/executor.h>
-#include <lib/gtest/test_loop_fixture.h>
-#include <lib/inspect/testing/cpp/inspect.h>
-
-namespace network_time_service {
-
-class InspectTest : public gtest::TestLoopFixture {
- public:
-  InspectTest() : executor_(dispatcher()) {}
-
- protected:
-  void RunPromiseToCompletion(fit::promise<> promise) {
-    executor_.schedule_task(std::move(promise));
-    RunLoopUntilIdle();
-  }
-
- private:
-  async::Executor executor_;
-};
-
-TEST_F(InspectTest, Success) {
-  inspect::Inspector inspector;
-  Inspect inspect(std::move(inspector.GetRoot()));
-
-  RunPromiseToCompletion(
-      inspect::ReadFromInspector(inspector).then([&](fit::result<inspect::Hierarchy>& hierarchy) {
-        ASSERT_TRUE(hierarchy.is_ok());
-        auto* success_count =
-            hierarchy.value().node().get_property<inspect::UintPropertyValue>("success_count");
-        ASSERT_TRUE(success_count);
-        ASSERT_EQ(0u, success_count->value());
-      }));
-
-  inspect.Success();
-  RunPromiseToCompletion(
-      inspect::ReadFromInspector(inspector).then([&](fit::result<inspect::Hierarchy>& hierarchy) {
-        ASSERT_TRUE(hierarchy.is_ok());
-        auto* success_count =
-            hierarchy.value().node().get_property<inspect::UintPropertyValue>("success_count");
-        ASSERT_TRUE(success_count);
-        ASSERT_EQ(1u, success_count->value());
-      }));
-}
-
-TEST_F(InspectTest, Failure) {
-  inspect::Inspector inspector;
-  Inspect inspect(std::move(inspector.GetRoot()));
-
-  RunPromiseToCompletion(
-      inspect::ReadFromInspector(inspector).then([&](fit::result<inspect::Hierarchy>& hierarchy) {
-        ASSERT_TRUE(hierarchy.is_ok());
-        auto* failure_node = hierarchy.value().GetByPath({"failure_count"});
-        ASSERT_TRUE(failure_node);
-        ASSERT_TRUE(failure_node->children().empty());
-      }));
-
-  inspect.Failure(time_server::BAD_RESPONSE);
-  RunPromiseToCompletion(
-      inspect::ReadFromInspector(inspector).then([&](fit::result<inspect::Hierarchy>& hierarchy) {
-        ASSERT_TRUE(hierarchy.is_ok());
-        auto* failure_node = hierarchy.value().GetByPath({"failure_count"});
-        ASSERT_TRUE(failure_node);
-        ASSERT_EQ(1u, failure_node->node().properties().size());
-        auto* bad_response =
-            failure_node->node().get_property<inspect::UintPropertyValue>("bad_response");
-        ASSERT_TRUE(bad_response);
-        ASSERT_EQ(1u, bad_response->value());
-      }));
-
-  inspect.Failure(time_server::BAD_RESPONSE);
-  inspect.Failure(time_server::NETWORK_ERROR);
-  RunPromiseToCompletion(
-      inspect::ReadFromInspector(inspector).then([&](fit::result<inspect::Hierarchy>& hierarchy) {
-        ASSERT_TRUE(hierarchy.is_ok());
-        auto* failure_node = hierarchy.value().GetByPath({"failure_count"});
-        ASSERT_TRUE(failure_node);
-        ASSERT_EQ(2u, failure_node->node().properties().size());
-        auto* bad_response =
-            failure_node->node().get_property<inspect::UintPropertyValue>("bad_response");
-        ASSERT_TRUE(bad_response);
-        ASSERT_EQ(2u, bad_response->value());
-        auto* network = failure_node->node().get_property<inspect::UintPropertyValue>("network");
-        ASSERT_TRUE(network);
-        ASSERT_EQ(1u, network->value());
-      }));
-}
-
-}  // namespace network_time_service
diff --git a/src/sys/time/network_time_service/main.cc b/src/sys/time/network_time_service/main.cc
deleted file mode 100644
index 040cf50..0000000
--- a/src/sys/time/network_time_service/main.cc
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2017 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <lib/async-loop/cpp/loop.h>
-#include <lib/async-loop/default.h>
-#include <lib/async/cpp/task.h>
-#include <lib/sys/cpp/component_context.h>
-#include <lib/sys/inspect/cpp/component.h>
-#include <lib/syslog/cpp/macros.h>
-
-#include "lib/fidl/cpp/binding_set.h"
-#include "lib/fit/function.h"
-#include "src/lib/fxl/command_line.h"
-#include "src/lib/fxl/log_settings_command_line.h"
-#include "src/sys/time/lib/network_time/time_server_config.h"
-#include "src/sys/time/network_time_service/inspect.h"
-#include "src/sys/time/network_time_service/service.h"
-
-constexpr char kServerConfigPath[] = "/pkg/data/roughtime-servers.json";
-
-int main(int argc, char** argv) {
-  auto command_line = fxl::CommandLineFromArgcArgv(argc, argv);
-  if (!fxl::SetLogSettingsFromCommandLine(command_line, {"time", "network_time_service"})) {
-    return 1;
-  }
-
-  const std::string config_path =
-      command_line.GetOptionValueWithDefault("config", kServerConfigPath);
-  FX_LOGS(INFO) << "Opening client config from " << config_path;
-  time_server::TimeServerConfig server_config;
-  if (!server_config.Parse(std::move(config_path))) {
-    FX_LOGS(FATAL) << "Failed to parse client config";
-    return 1;
-  }
-  // Currently this only supports one roughtime server.
-  time_server::RoughTimeServer server = server_config.ServerList()[0];
-
-  async::Loop loop(&kAsyncLoopConfigAttachToCurrentThread);
-  auto context = sys::ComponentContext::CreateAndServeOutgoingDirectory();
-  sys::ComponentInspector inspector(context.get());
-
-  network_time_service::TimeServiceImpl svc(
-      std::move(context), std::move(server), loop.dispatcher(),
-      network_time_service::Inspect(std::move(inspector.root())));
-  loop.Run();
-  return 0;
-}
diff --git a/src/sys/time/network_time_service/meta/network_time_service.cmx b/src/sys/time/network_time_service/meta/network_time_service.cmx
deleted file mode 100644
index 951b17d..0000000
--- a/src/sys/time/network_time_service/meta/network_time_service.cmx
+++ /dev/null
@@ -1,16 +0,0 @@
-{
-    "include": [
-        "sdk/lib/diagnostics/syslog/client.shard.cmx"
-    ],
-    "program": {
-        "binary": "bin/network_time_service"
-    },
-    "sandbox": {
-        "services": [
-            "fuchsia.net.NameLookup",
-            "fuchsia.posix.socket.Provider",
-            "fuchsia.sys.Environment",
-            "fuchsia.sys.Loader"
-        ]
-    }
-}
diff --git a/src/sys/time/network_time_service/meta/network_time_service_tests.cmx b/src/sys/time/network_time_service/meta/network_time_service_tests.cmx
deleted file mode 100644
index f77cc17..0000000
--- a/src/sys/time/network_time_service/meta/network_time_service_tests.cmx
+++ /dev/null
@@ -1,23 +0,0 @@
-{
-    "facets": {
-        "fuchsia.test": {
-            "injected-services": {
-                "fuchsia.posix.socket.Provider": "fuchsia-pkg://fuchsia.com/netstack#meta/netstack.cmx"
-            }
-        }
-    },
-    "include": [
-        "sdk/lib/diagnostics/syslog/client.shard.cmx"
-    ],
-    "program": {
-        "binary": "test/network_time_service_tests"
-    },
-    "sandbox": {
-        "features": [
-            "isolated-temp"
-        ],
-        "services": [
-            "fuchsia.posix.socket.Provider"
-        ]
-    }
-}
diff --git a/src/sys/time/network_time_service/roughtime-servers.json b/src/sys/time/network_time_service/roughtime-servers.json
deleted file mode 100644
index 0610ed8..0000000
--- a/src/sys/time/network_time_service/roughtime-servers.json
+++ /dev/null
@@ -1,13 +0,0 @@
-{
-  "servers": [
-    {
-      "name": "Google",
-      "publicKey": "7ad3da688c5c04c635a14786a70bcf30224cc25455371bf9d4a2bfb64b682534",
-      "addresses": [
-        {
-          "address": "roughtime.sandbox.google.com:2002"
-        }
-      ]
-    }
-  ]
-}
diff --git a/src/sys/time/network_time_service/service.cc b/src/sys/time/network_time_service/service.cc
deleted file mode 100644
index 711448f..0000000
--- a/src/sys/time/network_time_service/service.cc
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright 2017 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/sys/time/network_time_service/service.h"
-
-#include <lib/async/cpp/time.h>
-#include <lib/syslog/cpp/macros.h>
-#include <zircon/syscalls.h>
-
-#include <fstream>
-
-namespace network_time_service {
-
-TimeServiceImpl::TimeServiceImpl(std::unique_ptr<sys::ComponentContext> context,
-                                 time_server::RoughTimeServer rough_time_server,
-                                 async_dispatcher_t* dispatcher, Inspect inspect,
-                                 RetryConfig retry_config)
-    : context_(std::move(context)),
-      rough_time_server_(std::move(rough_time_server)),
-      inspect_(std::move(inspect)),
-      push_source_binding_(this),
-      status_watcher_(time_external::Status::OK),
-      dispatcher_(dispatcher),
-      consecutive_poll_failures_(0),
-      retry_config_(retry_config) {
-  push_source_binding_.set_error_handler([&](zx_status_t error) {
-    // Clean up client state for the next client.
-    ResetPushSourceClient(error);
-  });
-  fidl::InterfaceRequestHandler<time_external::PushSource> handler =
-      [&](fidl::InterfaceRequest<time_external::PushSource> request) {
-        if (push_source_binding_.is_bound()) {
-          FX_LOGS(WARNING) << "Received multiple connection requests which is unsupported";
-          request.Close(ZX_ERR_ALREADY_BOUND);
-        } else {
-          push_source_binding_.Bind(std::move(request));
-        }
-      };
-  context_->outgoing()->AddPublicService(std::move(handler));
-  // TODO: trigger a check for when network becomes available so we can properly
-  // report the INITIALIZING state rather than starting on OK.
-}
-
-TimeServiceImpl::~TimeServiceImpl() = default;
-
-void TimeServiceImpl::AsyncPollSamples(async_dispatcher_t* dispatcher, async::TaskBase* task,
-                                       zx_status_t zx_status) {
-  zx_time_t before = zx_clock_get_monotonic();
-  auto ret = rough_time_server_.GetTimeFromServer();
-  zx_time_t after = zx_clock_get_monotonic();
-
-  time_external::Status status;
-  if (ret.first == time_server::OK && ret.second) {
-    time_external::TimeSample sample;
-    sample.set_monotonic((before + after) / 2);
-    sample.set_utc(ret.second->get());
-    sample.set_standard_deviation(EstimateStandardDeviation(before, after));
-    sample_watcher_.Update(std::move(sample));
-    dispatcher_last_success_time_.emplace(async::Now(dispatcher));
-    status = time_external::Status::OK;
-    consecutive_poll_failures_ = 0;
-    inspect_.Success();
-  } else {
-    switch (ret.first) {
-      case time_server::OK:
-        status = time_external::Status::UNKNOWN_UNHEALTHY;
-        break;
-      case time_server::BAD_RESPONSE:
-        status = time_external::Status::PROTOCOL;
-        break;
-      case time_server::NETWORK_ERROR:
-        status = time_external::Status::NETWORK;
-        break;
-      case time_server::NOT_SUPPORTED:
-      default:
-        status = time_external::Status::UNKNOWN_UNHEALTHY;
-        break;
-    }
-    zx::time next_poll_time =
-        async::Now(dispatcher_) + retry_config_.WaitAfterFailure(consecutive_poll_failures_);
-    ScheduleAsyncPoll(next_poll_time);
-    consecutive_poll_failures_++;
-    inspect_.Failure(ret.first);
-  }
-
-  status_watcher_.Update(status);
-}
-
-void TimeServiceImpl::ScheduleAsyncPoll(zx::time dispatch_time) {
-  // try to post a task, ZX_ERR_ALREADY_EXISTS indicates it is already scheduled.
-  zx_status_t post_status = sample_poll_task_.PostForTime(dispatcher_, dispatch_time);
-  if (post_status != ZX_OK && post_status != ZX_ERR_ALREADY_EXISTS) {
-    FX_LOGS(ERROR) << "Failed to post task!";
-  }
-}
-
-void TimeServiceImpl::UpdateDeviceProperties(time_external::Properties properties) {
-  // Time samples are currently taken independently of each other and therefore we don't
-  // need to take properties such as oscillator performance into account.
-}
-
-void TimeServiceImpl::WatchSample(TimeServiceImpl::WatchSampleCallback callback) {
-  if (!sample_watcher_.Watch(std::move(callback))) {
-    // failure to watch indicates we have multiple concurrent WatchSample calls so close the
-    // channel.
-    ResetPushSourceClient(ZX_ERR_BAD_STATE);
-    return;
-  }
-
-  zx::time next_poll_time(0);
-  if (dispatcher_last_success_time_) {
-    next_poll_time =
-        *dispatcher_last_success_time_ + zx::nsec(retry_config_.nanos_between_successes);
-  }
-  ScheduleAsyncPoll(next_poll_time);
-}
-
-void TimeServiceImpl::WatchStatus(TimeServiceImpl::WatchStatusCallback callback) {
-  if (!status_watcher_.Watch(std::move(callback))) {
-    // failure to watch indicates we have multiple concurrent WatchSample calls so close the
-    // channel.
-    ResetPushSourceClient(ZX_ERR_BAD_STATE);
-    return;
-  }
-}
-
-void TimeServiceImpl::ResetPushSourceClient(zx_status_t epitaph) {
-  push_source_binding_.Close(epitaph);
-  push_source_binding_.Unbind();
-  sample_watcher_.ResetClient();
-  status_watcher_.ResetClient();
-}
-
-zx_time_t EstimateStandardDeviation(zx_time_t mono_before, zx_time_t mono_after) {
-  // Coarsely approximate error based on the round trip time. Here we assume the error
-  // distribution is normal and the midpoint of the rtt is the mean. In a normal distribution
-  // nearly all samples fall within 3*standard_distribution of the mean, so the rtt is
-  // roughly 6*standard_deviation.
-  // TODO(fxbug.dev/61462) - add in errors from the 'radius' value provided by roughtime.
-  return (mono_after - mono_before) / 6;
-}
-
-}  // namespace network_time_service
diff --git a/src/sys/time/network_time_service/service.h b/src/sys/time/network_time_service/service.h
deleted file mode 100644
index c37718f..0000000
--- a/src/sys/time/network_time_service/service.h
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2017 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SRC_SYS_TIME_NETWORK_TIME_SERVICE_SERVICE_H_
-#define SRC_SYS_TIME_NETWORK_TIME_SERVICE_SERVICE_H_
-
-#include <fuchsia/time/external/cpp/fidl.h>
-#include <lib/async/cpp/task.h>
-#include <lib/sys/cpp/component_context.h>
-
-#include <algorithm>
-#include <vector>
-
-#include "lib/fidl/cpp/binding_set.h"
-#include "src/sys/time/lib/network_time/time_server_config.h"
-#include "src/sys/time/network_time_service/inspect.h"
-#include "src/sys/time/network_time_service/watcher.h"
-
-const uint64_t kMinNanosBetweenFailures = 1 * 1'000'000'000u;
-const uint32_t kMaxRetryExponent = 3;
-const uint32_t kTriesPerExponent = 3;
-const uint64_t kNanosBetweenSuccesses = 30 * 60 * ((uint64_t)1'000'000'000u);
-
-namespace time_external = fuchsia::time::external;
-namespace network_time_service {
-
-// Defines how the |TimeServiceImpl| PushSource polls for updates. Retry time for
-// errors begins with |min_nanos_between_failures|, and doubles after |tries_per_exponent|
-// failures.
-class RetryConfig {
- public:
-  RetryConfig(uint64_t min_nanos_between_failures = kMinNanosBetweenFailures,
-              uint32_t max_exponent = kMaxRetryExponent,
-              uint32_t tries_per_exponent = kTriesPerExponent,
-              uint64_t nanos_between_successes = kNanosBetweenSuccesses)
-      : nanos_between_successes(nanos_between_successes),
-        min_nanos_between_failures_(min_nanos_between_failures),
-        max_exponent_(max_exponent),
-        tries_per_exponent_(tries_per_exponent){};
-
-  // Returns the duration to wait for the |retry_number|th retry. The first retry
-  // is denoted 0.
-  zx::duration WaitAfterFailure(uint32_t retry_number) {
-    uint32_t exponent = std::min(retry_number / tries_per_exponent_, max_exponent_);
-    return zx::nsec(min_nanos_between_failures_ << exponent);
-  }
-
-  uint64_t nanos_between_successes;
-
- private:
-  uint64_t min_nanos_between_failures_;
-  uint32_t max_exponent_;
-  uint32_t tries_per_exponent_;
-};
-
-// Implementation of the FIDL time services.
-// TODO(fxbug.dev/58068): This currently assumes that there is only a single client. To support
-// multiple clients, this needs to retain per-client state so that it understands when
-// a value hasn't been returned yet to a particular client, and so that it can close
-// channels to only a single client as needed.
-class TimeServiceImpl : public time_external::PushSource {
- public:
-  // Constructs the time service with a caller-owned application context.
-  TimeServiceImpl(std::unique_ptr<sys::ComponentContext> context,
-                  time_server::RoughTimeServer rough_time_server, async_dispatcher_t* dispatcher,
-                  Inspect inspect, RetryConfig retry_config = RetryConfig());
-  ~TimeServiceImpl();
-
-  // |PushSource|:
-  void UpdateDeviceProperties(time_external::Properties properties) override;
-
-  // |PushSource|:
-  void WatchSample(WatchSampleCallback callback) override;
-
-  // |PushSource|:
-  void WatchStatus(WatchStatusCallback callback) override;
-
- private:
-  // Polls for new time samples and post changes to the time source status.
-  void AsyncPollSamples(async_dispatcher_t* dispatcher, async::TaskBase* task, zx_status_t status);
-
-  // Schedules a sample poll to begin at the specified time in the dispatcher's clock.
-  void ScheduleAsyncPoll(zx::time dispatch_time);
-
-  // Remove the PushSource client with the specified epitaph and reset client state.
-  void ResetPushSourceClient(zx_status_t epitaph);
-
-  std::unique_ptr<sys::ComponentContext> context_;
-  time_server::RoughTimeServer rough_time_server_;
-  Inspect inspect_;
-
-  fidl::Binding<time_external::PushSource> push_source_binding_;
-  Watcher<time_external::Status> status_watcher_;
-  Watcher<time_external::TimeSample> sample_watcher_;
-
-  async_dispatcher_t* dispatcher_;
-  // Time of last successful update. Reported in the dispatcher's clock which may not be monotonic.
-  std::optional<zx::time> dispatcher_last_success_time_;
-  uint32_t consecutive_poll_failures_;
-  async::TaskMethod<TimeServiceImpl, &TimeServiceImpl::AsyncPollSamples> sample_poll_task_{this};
-  RetryConfig retry_config_;
-};
-
-// Estimate the standard deviation based on the monotonic times taken before and after a server was
-// polled.
-zx_time_t EstimateStandardDeviation(zx_time_t mono_before, zx_time_t mono_after);
-
-}  // namespace network_time_service
-
-#endif  // SRC_SYS_TIME_NETWORK_TIME_SERVICE_SERVICE_H_
diff --git a/src/sys/time/network_time_service/service_test.cc b/src/sys/time/network_time_service/service_test.cc
deleted file mode 100644
index 3e569d1..0000000
--- a/src/sys/time/network_time_service/service_test.cc
+++ /dev/null
@@ -1,324 +0,0 @@
-// Copyright 2020 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/sys/time/network_time_service/service.h"
-
-#include <fcntl.h>
-#include <lib/async/cpp/executor.h>
-#include <lib/fdio/directory.h>
-#include <lib/fdio/fd.h>
-#include <lib/fdio/fdio.h>
-#include <lib/gtest/test_loop_fixture.h>
-#include <lib/inspect/testing/cpp/inspect.h>
-#include <lib/sys/cpp/file_descriptor.h>
-#include <lib/sys/cpp/testing/component_context_provider.h>
-#include <lib/sys/inspect/cpp/component.h>
-#include <lib/zx/time.h>
-#include <stdio.h>
-#include <time.h>
-
-#include <thread>
-
-#include "src/lib/files/scoped_temp_dir.h"
-#include "src/sys/time/lib/network_time/test/common.h"
-#include "src/sys/time/lib/network_time/test/local_roughtime_server.h"
-#include "src/sys/time/lib/network_time/time_server_config.h"
-#include "src/sys/time/network_time_service/inspect.h"
-#include "third_party/roughtime/protocol.h"
-
-namespace time_external = fuchsia::time::external;
-namespace network_time_service {
-
-// Although our test Roughtime server doesn't advance time, there is some error introduced
-// in the processing that results in the final reported sample being slightly off.
-const int64_t kTimeSpreadNanos = 100'000'000;
-
-const int64_t kExpectedTimeNanos = 7'000'000'000'000;
-
-const uint64_t kTestNanosAfterPoll = 100;
-
-class PushSourceTest : public gtest::TestLoopFixture {
- public:
-  PushSourceTest() : executor_(dispatcher()) {}
-
- protected:
-  void TearDown() override {
-    TestLoopFixture::TearDown();
-    if (local_roughtime_server_) {
-      local_roughtime_server_->Stop();
-    }
-    time_service_.reset();
-  }
-
-  void RunPromiseToCompletion(fit::promise<> promise) {
-    executor_.schedule_task(std::move(promise));
-    RunLoopUntilIdle();
-  }
-
-  std::shared_ptr<time_server::LocalRoughtimeServer> local_roughtime_server_ = nullptr;
-
-  // Launch a local Roughtime server in a new thread. Returns the port it is running on.
-  uint16_t LaunchLocalRoughtimeServer(const uint8_t* private_key) {
-    std::unique_ptr<time_server::LocalRoughtimeServer> local_roughtime_raw =
-        time_server::LocalRoughtimeServer::MakeInstance(private_key, 0, 1537485257118'000);
-    // The roughtime server thread may outlive the test, so wrap the server in a shared pointer
-    // so both test and server threads may safely access it.
-    local_roughtime_server_.reset(local_roughtime_raw.release());
-    std::shared_ptr<time_server::LocalRoughtimeServer> local_roughtime_server(
-        local_roughtime_server_);
-    auto roughtime_thread = std::make_unique<std::thread>(
-        std::thread([local_roughtime_server]() { local_roughtime_server->Start(); }));
-    roughtime_thread->detach();
-    uint16_t port_number = local_roughtime_server_->GetPortNumber();
-    EXPECT_GT(port_number, 0);
-    return port_number;
-  }
-
-  // Launch a TimeServiceImpl that polls a roughtime server listening on `roughtime_port`.
-  void LaunchService(uint16_t roughtime_port, inspect::Node node) {
-    time_server::TimeServerConfig config = ConfigForLocalServer(roughtime_port);
-    time_server::RoughTimeServer server = config.ServerList()[0];
-    network_time_service::RetryConfig retry_config(kTestNanosAfterPoll, 0, 1, kTestNanosAfterPoll);
-    network_time_service::Inspect inspect(std::move(node));
-    time_service_.reset(new TimeServiceImpl(provider_.TakeContext(), server, dispatcher(),
-                                            std::move(inspect), retry_config));
-  }
-
-  // Connect to the PushSource protocol of a TimeServiceImpl. Launches it if not already launched.
-  time_external::PushSourcePtr ConnectToService() {
-    time_external::PushSourcePtr push_source;
-    provider_.ConnectToPublicService(push_source.NewRequest());
-    return push_source;
-  }
-
- private:
-  time_server::TimeServerConfig ConfigForLocalServer(uint16_t port_number) {
-    std::string config_json = time_server::local_client_config(port_number);
-    std::string client_config_path;
-    files::ScopedTempDir temp_dir;
-    temp_dir.NewTempFileWithData(config_json, &client_config_path);
-    time_server::TimeServerConfig config;
-    config.Parse(client_config_path);
-    return config;
-  }
-
-  std::unique_ptr<TimeServiceImpl> time_service_;
-  sys::testing::ComponentContextProvider provider_;
-  async::Executor executor_;
-};
-
-TEST_F(PushSourceTest, PushSourceRejectsMultipleClients) {
-  uint16_t roughtime_port = LaunchLocalRoughtimeServer(time_server::kTestPrivateKey);
-  local_roughtime_server_->SetTime(kExpectedTimeNanos / 1000);
-  LaunchService(roughtime_port, inspect::Node());
-  auto first_client = ConnectToService();
-
-  // Currently the PushSource implementation accepts only one client at a time - see fxbug.dev/58068
-  auto second_client = ConnectToService();
-  bool error_handler_called = false;
-  second_client.set_error_handler([&](zx_status_t status) {
-    EXPECT_EQ(status, ZX_ERR_ALREADY_BOUND);
-    error_handler_called = true;
-  });
-  second_client->WatchSample([&](time_external::TimeSample sample) { FAIL(); });
-  RunLoopUntilIdle();
-  EXPECT_TRUE(error_handler_called);
-
-  // original client should still be usable.
-  bool call_completed = false;
-  first_client->WatchSample([&](time_external::TimeSample sample) { call_completed = true; });
-  RunLoopUntilIdle();
-  EXPECT_TRUE(call_completed);
-}
-
-TEST_F(PushSourceTest, PushSourceStateResetOnDisconnect) {
-  uint16_t roughtime_port = LaunchLocalRoughtimeServer(time_server::kTestPrivateKey);
-  local_roughtime_server_->SetTime(kExpectedTimeNanos / 1000);
-  LaunchService(roughtime_port, inspect::Node());
-  time_external::TimeSample original_sample;
-
-  auto first_client = ConnectToService();
-  bool first_call_complete = false;
-  first_client->WatchSample([&](time_external::TimeSample sample) {
-    original_sample = std::move(sample);
-    first_call_complete = true;
-  });
-  RunLoopUntilIdle();
-  EXPECT_TRUE(first_call_complete);
-  first_client.Unbind();
-  RunLoopUntilIdle();
-
-  // last sent state for previous client should not be retained, so call should
-  // return immediately with the same result.
-  bool second_call_complete = false;
-  auto second_client = ConnectToService();
-  second_client->WatchSample([&](time_external::TimeSample sample) {
-    EXPECT_EQ(original_sample.monotonic(), sample.monotonic());
-    EXPECT_EQ(original_sample.utc(), sample.utc());
-    second_call_complete = true;
-  });
-  RunLoopUntilIdle();
-  EXPECT_TRUE(second_call_complete);
-}
-
-TEST_F(PushSourceTest, WatchSample) {
-  uint16_t roughtime_port = LaunchLocalRoughtimeServer(time_server::kTestPrivateKey);
-  inspect::Inspector inspector;
-  LaunchService(roughtime_port, std::move(inspector.GetRoot()));
-  auto proxy = ConnectToService();
-  local_roughtime_server_->SetTime(kExpectedTimeNanos / 1000);
-
-  bool first_call_complete = false;
-  zx_time_t mono_before = zx_clock_get_monotonic();
-  proxy->WatchSample([&](time_external::TimeSample sample) {
-    zx_time_t mono_after = zx_clock_get_monotonic();
-    EXPECT_GT(sample.utc(), kExpectedTimeNanos - kTimeSpreadNanos);
-    EXPECT_LT(sample.utc(), kExpectedTimeNanos + kTimeSpreadNanos);
-    EXPECT_GT(sample.monotonic(), mono_before);
-    EXPECT_LT(sample.monotonic(), mono_after);
-    EXPECT_GT(sample.standard_deviation(), 0);
-    EXPECT_LT(sample.standard_deviation(), EstimateStandardDeviation(mono_before, mono_after));
-    first_call_complete = true;
-  });
-  RunLoopUntilIdle();
-  EXPECT_TRUE(first_call_complete);
-  bool now = false;
-  RunPromiseToCompletion(
-      inspect::ReadFromInspector(inspector).then([&](fit::result<inspect::Hierarchy>& hierarchy) {
-        ASSERT_TRUE(hierarchy.is_ok());
-        auto* success_count =
-            hierarchy.value().node().get_property<inspect::UintPropertyValue>("success_count");
-        ASSERT_TRUE(success_count);
-        ASSERT_EQ(1u, success_count->value());
-        now = true;
-      }));
-  ASSERT_TRUE(now);
-
-  bool second_call_complete = false;
-  mono_before = zx_clock_get_monotonic();
-  proxy->WatchSample([&](time_external::TimeSample sample) {
-    zx_time_t mono_after = zx_clock_get_monotonic();
-    EXPECT_GT(sample.utc(), kExpectedTimeNanos - kTimeSpreadNanos);
-    EXPECT_LT(sample.utc(), kExpectedTimeNanos + kTimeSpreadNanos);
-    EXPECT_GT(sample.monotonic(), mono_before);
-    EXPECT_LT(sample.monotonic(), mono_after);
-    EXPECT_GT(sample.standard_deviation(), 0);
-    EXPECT_LT(sample.standard_deviation(), EstimateStandardDeviation(mono_before, mono_after));
-    second_call_complete = true;
-  });
-  // Next poll should only complete after the configured wait period.
-  RunLoopFor(zx::nsec(kTestNanosAfterPoll / 2));
-  EXPECT_FALSE(second_call_complete);
-  RunLoopFor(zx::nsec(kTestNanosAfterPoll));
-  EXPECT_TRUE(second_call_complete);
-  RunPromiseToCompletion(
-      inspect::ReadFromInspector(inspector).then([&](fit::result<inspect::Hierarchy>& hierarchy) {
-        ASSERT_TRUE(hierarchy.is_ok());
-        auto* success_count =
-            hierarchy.value().node().get_property<inspect::UintPropertyValue>("success_count");
-        ASSERT_TRUE(success_count);
-        ASSERT_EQ(2u, success_count->value());
-      }));
-}
-
-TEST_F(PushSourceTest, WatchStatus) {
-  uint16_t roughtime_port = LaunchLocalRoughtimeServer(time_server::kTestPrivateKey);
-  LaunchService(roughtime_port, inspect::Node());
-  auto proxy = ConnectToService();
-  bool call_complete = false;
-  proxy->WatchStatus([&](time_external::Status status) {
-    EXPECT_EQ(status, time_external::Status::OK);
-    call_complete = true;
-  });
-  RunLoopUntilIdle();
-
-  // Second call should not complete since status should not change.
-  // A call to WatchSample made afterwards should complete while the status watch is active.
-  proxy->WatchStatus([&](time_external::Status status) { FAIL(); });
-  bool sample_call_complete = false;
-  proxy->WatchSample([&](time_external::TimeSample sample) { sample_call_complete = true; });
-  RunLoopUntilIdle();
-  EXPECT_TRUE(sample_call_complete);
-}
-
-TEST_F(PushSourceTest, WatchStatusUnhealthy) {
-  // Connect to a roughtime server signing with a bad key to simulate unhealthy.
-  uint16_t roughtime_port = LaunchLocalRoughtimeServer(time_server::kWrongPrivateKey);
-  inspect::Inspector inspector;
-  LaunchService(roughtime_port, std::move(inspector.GetRoot()));
-  auto proxy = ConnectToService();
-  // First call always indicates OK
-  proxy->WatchStatus(
-      [&](time_external::Status status) { EXPECT_EQ(status, time_external::Status::OK); });
-  RunLoopUntilIdle();
-
-  // Obtaining a sample should fail and result in reporting an unhealthy status.
-  proxy->WatchSample([&](time_external::TimeSample sample) { FAIL(); });
-  bool second_call_complete = false;
-  proxy->WatchStatus([&](time_external::Status status) {
-    EXPECT_EQ(status, time_external::Status::PROTOCOL);
-    second_call_complete = true;
-  });
-  RunLoopUntilIdle();
-  EXPECT_TRUE(second_call_complete);
-  RunPromiseToCompletion(
-      inspect::ReadFromInspector(inspector).then([&](fit::result<inspect::Hierarchy>& hierarchy) {
-        ASSERT_TRUE(hierarchy.is_ok());
-        auto* failure_node = hierarchy.value().GetByPath({"failure_count"});
-        auto* bad_response =
-            failure_node->node().get_property<inspect::UintPropertyValue>("bad_response");
-        ASSERT_TRUE(bad_response);
-        ASSERT_EQ(1u, bad_response->value());
-      }));
-}
-
-TEST_F(PushSourceTest, ChannelClosedOnConcurrentWatchStatus) {
-  uint16_t roughtime_port = LaunchLocalRoughtimeServer(time_server::kWrongPrivateKey);
-  LaunchService(roughtime_port, inspect::Node());
-  bool error_handler_called = false;
-  auto proxy = ConnectToService();
-  proxy.set_error_handler([&](zx_status_t status) {
-    EXPECT_EQ(status, ZX_ERR_BAD_STATE);
-    error_handler_called = true;
-  });
-  // first call always completes immediately
-  proxy->WatchStatus(
-      [&](time_external::Status status) { EXPECT_EQ(status, time_external::Status::OK); });
-  RunLoopUntilIdle();
-
-  proxy->WatchStatus([&](time_external::Status sample) { FAIL(); });
-  proxy->WatchStatus([&](time_external::Status sample) { FAIL(); });
-  RunLoopUntilIdle();
-  EXPECT_TRUE(error_handler_called);
-}
-
-TEST_F(PushSourceTest, ChannelClosedOnConcurrentWatchSample) {
-  uint16_t roughtime_port = LaunchLocalRoughtimeServer(time_server::kWrongPrivateKey);
-  LaunchService(roughtime_port, inspect::Node());
-  bool error_handler_called = false;
-  auto proxy = ConnectToService();
-  proxy.set_error_handler([&](zx_status_t status) {
-    EXPECT_EQ(status, ZX_ERR_BAD_STATE);
-    error_handler_called = true;
-  });
-
-  // First call should not complete as server is returning bad responses.
-  proxy->WatchSample([&](time_external::TimeSample sample) { FAIL(); });
-  proxy->WatchSample([&](time_external::TimeSample sample) { FAIL(); });
-  RunLoopUntilIdle();
-  EXPECT_TRUE(error_handler_called);
-}
-
-class RetryConfigTest : public gtest::TestLoopFixture {};
-
-TEST_F(RetryConfigTest, DefaultConfigTest) {
-  RetryConfig config;
-
-  std::vector<uint32_t> expected_durations_sec{1, 1, 1, 2, 2, 2, 4, 4, 4, 8, 8, 8, 8};
-  for (uint32_t failure_num = 0; failure_num < expected_durations_sec.size(); failure_num++) {
-    EXPECT_EQ(zx::sec(expected_durations_sec[failure_num]), config.WaitAfterFailure(failure_num));
-  }
-}
-
-}  // namespace network_time_service
diff --git a/src/sys/time/network_time_service/watcher.h b/src/sys/time/network_time_service/watcher.h
deleted file mode 100644
index d496483..0000000
--- a/src/sys/time/network_time_service/watcher.h
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2020 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SRC_SYS_TIME_NETWORK_TIME_SERVICE_WATCHER_H_
-#define SRC_SYS_TIME_NETWORK_TIME_SERVICE_WATCHER_H_
-
-#include <lib/fidl/cpp/clone.h>
-#include <lib/fidl/cpp/comparison.h>
-#include <lib/fit/function.h>
-
-namespace network_time_service {
-
-// A hanging get handler that allows parking callbacks, then invoking them
-// later when a new value is available. This class is not thread safe.
-template <class T>
-class Watcher {
- public:
-  using Callback = fit::function<void(T)>;
-
-  Watcher() {}
-
-  Watcher(T initial_value) : current_(std::move(initial_value)) {}
-
-  // Register a callback that is executed when a new value is produced and
-  // return if successful. Returns false without registering the callback if
-  // another callback is already registered.
-  bool Watch(Callback callback) {
-    if (!callback_) {
-      callback_ = std::move(callback);
-      CallbackIfNeeded();
-      return true;
-    }
-    return false;
-  }
-
-  // Push a new value. Any registered callback is invoked if the value has changed.
-  void Update(T new_value) {
-    current_ = std::move(new_value);
-    CallbackIfNeeded();
-  }
-
-  // Clears any registered callback and last sent state.
-  void ResetClient() {
-    last_sent_.reset();
-    callback_.reset();
-  }
-
- private:
-  T CloneValue(const T& sample) {
-    T clone;
-    fidl::Clone(sample, &clone);
-    return clone;
-  }
-
-  void CallbackIfNeeded() {
-    if (!callback_) {
-      return;
-    }
-    if (current_ && (!last_sent_ || !fidl::Equals(*current_, *last_sent_))) {
-      callback_.value()(CloneValue(current_.value()));
-      callback_.reset();
-      last_sent_ = CloneValue(current_.value());
-    }
-  }
-
-  std::optional<Callback> callback_;
-  std::optional<T> last_sent_;
-  std::optional<T> current_;
-};
-
-}  // namespace network_time_service
-
-#endif  // SRC_SYS_TIME_NETWORK_TIME_SERVICE_WATCHER_H_
diff --git a/src/sys/time/network_time_service/watcher_test.cc b/src/sys/time/network_time_service/watcher_test.cc
deleted file mode 100644
index b137176..0000000
--- a/src/sys/time/network_time_service/watcher_test.cc
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright 2020 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/sys/time/network_time_service/watcher.h"
-
-#include <fuchsia/time/external/cpp/fidl.h>
-#include <lib/gtest/test_loop_fixture.h>
-
-using namespace fuchsia::time::external;
-namespace network_time_service {
-
-// Tests Watcher using TimeSample as the contained value.
-class SampleWatcherTest : public gtest::TestLoopFixture {};
-
-using SampleWatcher = Watcher<TimeSample>;
-
-TEST_F(SampleWatcherTest, FirstWatch) {
-  SampleWatcher watcher;
-  bool initial_called = false;
-  EXPECT_TRUE(watcher.Watch([&](TimeSample sample) {
-    EXPECT_EQ(sample.monotonic(), 20);
-    EXPECT_EQ(sample.utc(), 40);
-    initial_called = true;
-  }));
-  EXPECT_FALSE(initial_called);
-  TimeSample initial_sample;
-  initial_sample.set_monotonic(20);
-  initial_sample.set_utc(40);
-  watcher.Update(std::move(initial_sample));
-  EXPECT_TRUE(initial_called);
-}
-
-TEST_F(SampleWatcherTest, FirstWatchWithInitial) {
-  TimeSample initial_sample;
-  initial_sample.set_monotonic(20);
-  initial_sample.set_utc(40);
-  SampleWatcher watcher(std::move(initial_sample));
-
-  bool initial_called = false;
-  EXPECT_TRUE(watcher.Watch([&](TimeSample sample) {
-    EXPECT_EQ(sample.monotonic(), 20);
-    EXPECT_EQ(sample.utc(), 40);
-    initial_called = true;
-  }));
-  EXPECT_TRUE(initial_called);
-}
-
-TEST_F(SampleWatcherTest, WatchAfterUpdate) {
-  SampleWatcher watcher;
-  TimeSample sample;
-  sample.set_monotonic(20);
-  sample.set_utc(40);
-  watcher.Update(std::move(sample));
-
-  bool callback_called = false;
-  EXPECT_TRUE(watcher.Watch([&](TimeSample sample) {
-    EXPECT_EQ(sample.monotonic(), 20);
-    EXPECT_EQ(sample.utc(), 40);
-    callback_called = true;
-  }));
-  EXPECT_TRUE(callback_called);
-}
-
-TEST_F(SampleWatcherTest, RegisterMultipleCallbacks) {
-  SampleWatcher watcher;
-  bool first_watch_called = false;
-  EXPECT_TRUE(watcher.Watch([&](TimeSample sample) { first_watch_called = true; }));
-  EXPECT_FALSE(watcher.Watch([&](TimeSample sample) { FAIL(); }));
-
-  watcher.Update(TimeSample());
-  EXPECT_TRUE(first_watch_called);
-}
-
-TEST_F(SampleWatcherTest, WatchMultiple) {
-  SampleWatcher watcher;
-  bool initial_called = false;
-  EXPECT_TRUE(watcher.Watch([&](TimeSample sample) {
-    EXPECT_EQ(sample.monotonic(), 20);
-    EXPECT_EQ(sample.utc(), 40);
-    initial_called = true;
-  }));
-  EXPECT_FALSE(initial_called);
-  TimeSample initial_sample;
-  initial_sample.set_monotonic(20);
-  initial_sample.set_utc(40);
-  watcher.Update(std::move(initial_sample));
-  EXPECT_TRUE(initial_called);
-
-  // second call returns only after update pushed
-  bool second_called = false;
-  EXPECT_TRUE(watcher.Watch([&](TimeSample sample) {
-    EXPECT_EQ(sample.monotonic(), 30);
-    EXPECT_EQ(sample.utc(), 60);
-    second_called = true;
-  }));
-  EXPECT_FALSE(second_called);
-  TimeSample second_sample;
-  second_sample.set_monotonic(30);
-  second_sample.set_utc(60);
-  watcher.Update(std::move(second_sample));
-  EXPECT_TRUE(second_called);
-}
-
-TEST_F(SampleWatcherTest, MultipleUpdates) {
-  SampleWatcher watcher;
-  TimeSample initial_sample;
-  initial_sample.set_monotonic(20);
-  initial_sample.set_utc(40);
-  watcher.Update(std::move(initial_sample));
-  bool initial_called = false;
-  EXPECT_TRUE(watcher.Watch([&](TimeSample sample) {
-    EXPECT_EQ(sample.monotonic(), 20);
-    EXPECT_EQ(sample.utc(), 40);
-    initial_called = true;
-  }));
-  EXPECT_TRUE(initial_called);
-
-  // second sample triggers callback
-  TimeSample second_sample;
-  second_sample.set_monotonic(30);
-  second_sample.set_utc(60);
-  watcher.Update(std::move(second_sample));
-  bool second_called = false;
-  EXPECT_TRUE(watcher.Watch([&](TimeSample sample) {
-    EXPECT_EQ(sample.monotonic(), 30);
-    EXPECT_EQ(sample.utc(), 60);
-    second_called = true;
-  }));
-  EXPECT_TRUE(second_called);
-
-  // identical sample does not trigger callback
-  TimeSample third_sample;
-  third_sample.set_monotonic(30);
-  third_sample.set_utc(60);
-  watcher.Update(std::move(third_sample));
-  watcher.Watch([&](TimeSample sample) { FAIL(); });
-}
-
-TEST_F(SampleWatcherTest, ResetClient) {
-  SampleWatcher watcher;
-  TimeSample sample;
-  sample.set_monotonic(70);
-  sample.set_utc(140);
-  watcher.Update(std::move(sample));
-  watcher.Watch([&](TimeSample sample) {});
-  // second watch should not return as there's no update
-  watcher.Watch([&](TimeSample sample) { FAIL(); });
-  watcher.ResetClient();
-  bool third_called = false;
-  EXPECT_TRUE(watcher.Watch([&](TimeSample sample) {
-    EXPECT_EQ(sample.monotonic(), 70);
-    EXPECT_EQ(sample.utc(), 140);
-    third_called = true;
-  }));
-  EXPECT_TRUE(third_called);
-}
-
-}  // namespace network_time_service
diff --git a/src/sys/time/time_test_client/BUILD.gn b/src/sys/time/time_test_client/BUILD.gn
index a5cbaba..193dd44 100644
--- a/src/sys/time/time_test_client/BUILD.gn
+++ b/src/sys/time/time_test_client/BUILD.gn
@@ -10,7 +10,6 @@
   edition = "2018"
 
   deps = [
-    "//sdk/fidl/fuchsia.time:fuchsia.time-rustc",
     "//src/lib/fidl/rust/fidl",
     "//src/lib/fuchsia-async",
     "//src/lib/fuchsia-component",
diff --git a/src/sys/time/time_test_client/src/main.rs b/src/sys/time/time_test_client/src/main.rs
index 088a81e..7461f0d 100644
--- a/src/sys/time/time_test_client/src/main.rs
+++ b/src/sys/time/time_test_client/src/main.rs
@@ -10,7 +10,7 @@
 use {
     anyhow::{Context as _, Error},
     chrono::prelude::*,
-    fidl_fuchsia_time, fuchsia_async as fasync, fuchsia_runtime as runtime, fuchsia_zircon as zx,
+    fuchsia_async as fasync, fuchsia_runtime as runtime, fuchsia_zircon as zx,
     futures::prelude::*,
     lazy_static::lazy_static,
     log::{info, warn},
@@ -36,10 +36,6 @@
         Ok(clock_monitor) => futures.push(clock_monitor.execute().boxed()),
         Err(err) => warn!("{}", err),
     }
-    match FidlMonitor::new() {
-        Ok(fidl_monitor) => futures.push(fidl_monitor.execute().boxed()),
-        Err(err) => warn!("{}", err),
-    }
     future::join_all(futures).await;
 }
 
@@ -203,40 +199,3 @@
         )
     }
 }
-
-/// A monitor for the `fuchsia.time.Utc` FIDL interface to log changes in time source.
-struct FidlMonitor {
-    /// A proxy for a connection to a `fuchsia.time.Utc` server.
-    utc_service: fidl_fuchsia_time::UtcProxy,
-}
-
-impl FidlMonitor {
-    /// Creates a new `FidlMonitor` or returns an error if the FIDL UTC service could not be found.
-    pub fn new() -> Result<Self, Error> {
-        let utc_service =
-            fuchsia_component::client::connect_to_service::<fidl_fuchsia_time::UtcMarker>()?;
-        Ok(FidlMonitor { utc_service })
-    }
-
-    /// Async function to operate this monitor.
-    async fn execute(self) {
-        loop {
-            match self.utc_service.watch_state().await {
-                Ok(fidl_fuchsia_time::UtcState { source: Some(source), .. }) => {
-                    info!("fuchsia.time.Utc source: {:?}", source);
-                }
-                Ok(fidl_fuchsia_time::UtcState { source: None, .. }) => {
-                    // This failure mode exists because UtcState is a table, but its not likely
-                    // to occur.
-                    warn!("fuchsia.time.Utc state did not contain a source");
-                }
-                Err(err) => {
-                    warn!("Failed to get fuchsia.time.UTC state: {:?}", err);
-                    // Assume service failures are unlikely to self-resolve so quit the monitor to
-                    // avoid spamming the log.
-                    return;
-                }
-            }
-        }
-    }
-}
diff --git a/src/sys/time/timekeeper/BUILD.gn b/src/sys/time/timekeeper/BUILD.gn
index 03c75250..28b3352 100644
--- a/src/sys/time/timekeeper/BUILD.gn
+++ b/src/sys/time/timekeeper/BUILD.gn
@@ -53,7 +53,6 @@
     "src/enums.rs",
     "src/estimator.rs",
     "src/main.rs",
-    "src/notifier.rs",
     "src/rtc.rs",
     "src/time_source.rs",
     "src/time_source_manager.rs",
diff --git a/src/sys/time/timekeeper/README.md b/src/sys/time/timekeeper/README.md
index ec00edf..6912d62 100644
--- a/src/sys/time/timekeeper/README.md
+++ b/src/sys/time/timekeeper/README.md
@@ -1,12 +1,9 @@
 # timekeeper
 
-timekeeper implements the [`fuchsia.time.Utc`][utc-fidl] protocol.
-
 Because timekeeper is implemented in Rust, we recommend that you have consulted the [Fuchsia docs on
 developing with Rust](fuchsia-rust-docs).
 
 [fuchsia-rust-docs]: ../../../docs/development/languages/rust/README.md
-[utc-fidl]: ./fidl/utc.fidl
 
 ## Getting Started
 
diff --git a/src/sys/time/timekeeper/service.config b/src/sys/time/timekeeper/service.config
index c630096..ba1b340 100644
--- a/src/sys/time/timekeeper/service.config
+++ b/src/sys/time/timekeeper/service.config
@@ -1,8 +1,5 @@
 {
-    "startup_services": [
-      "fuchsia.time.Utc"
-    ],
-    "services": {
-      "fuchsia.time.Utc": "fuchsia-pkg://fuchsia.com/timekeeper#meta/timekeeper.cmx"
-    }
+    "apps": [
+      "fuchsia-pkg://fuchsia.com/timekeeper#meta/timekeeper.cmx"
+    ]
 }
diff --git a/src/sys/time/timekeeper/src/clock_manager.rs b/src/sys/time/timekeeper/src/clock_manager.rs
index 36f76b8..c3be29f 100644
--- a/src/sys/time/timekeeper/src/clock_manager.rs
+++ b/src/sys/time/timekeeper/src/clock_manager.rs
@@ -9,14 +9,13 @@
             ClockCorrectionStrategy, ClockUpdateReason, StartClockSource, Track, WriteRtcOutcome,
         },
         estimator::Estimator,
-        notifier::Notifier,
         rtc::Rtc,
         time_source::TimeSource,
         time_source_manager::{KernelMonotonicProvider, TimeSourceManager},
         util::time_at_monotonic,
     },
     chrono::prelude::*,
-    fidl_fuchsia_time as ftime, fuchsia_async as fasync, fuchsia_zircon as zx,
+    fuchsia_async as fasync, fuchsia_zircon as zx,
     log::{error, info},
     std::{
         fmt::{self, Debug},
@@ -89,8 +88,6 @@
     estimator: Option<Estimator<D>>,
     /// An optional real time clock that will be updated when new UTC estimates are produced.
     rtc: Option<R>,
-    /// An optional notifier used to communicate changes in the clock synchronization state.
-    notifier: Option<Notifier>,
     /// A diagnostics implementation for recording events of note.
     diagnostics: Arc<D>,
     /// The track of the estimate being managed.
@@ -106,11 +103,10 @@
         clock: Arc<zx::Clock>,
         time_source_manager: TimeSourceManager<T, D, KernelMonotonicProvider>,
         rtc: Option<R>,
-        notifier: Option<Notifier>,
         diagnostics: Arc<D>,
         track: Track,
     ) {
-        ClockManager::new(clock, time_source_manager, rtc, notifier, diagnostics, track)
+        ClockManager::new(clock, time_source_manager, rtc, diagnostics, track)
             .maintain_clock()
             .await
     }
@@ -125,7 +121,6 @@
         clock: Arc<zx::Clock>,
         time_source_manager: TimeSourceManager<T, D, KernelMonotonicProvider>,
         rtc: Option<R>,
-        notifier: Option<Notifier>,
         diagnostics: Arc<D>,
         track: Track,
     ) -> Self {
@@ -134,7 +129,6 @@
             time_source_manager,
             estimator: None,
             rtc,
-            notifier,
             diagnostics,
             track,
             delayed_updates: None,
@@ -190,11 +184,6 @@
                 };
                 self.diagnostics.record(Event::WriteRtc { outcome });
             }
-
-            // And trigger the notifier if we have one.
-            if let Some(ref mut notifier) = self.notifier {
-                notifier.set_source(ftime::UtcSource::External).await;
-            }
         }
     }
 
@@ -342,7 +331,6 @@
         fuchsia_async as fasync, fuchsia_zircon as zx,
         futures::FutureExt,
         lazy_static::lazy_static,
-        std::task::Poll,
         test_util::{assert_geq, assert_leq},
     };
 
@@ -374,7 +362,6 @@
         clock: Arc<zx::Clock>,
         samples: Vec<Sample>,
         rtc: Option<FakeRtc>,
-        notifier: Option<Notifier>,
         diagnostics: Arc<FakeDiagnostics>,
     ) -> ClockManager<FakeTimeSource, FakeRtc, FakeDiagnostics> {
         let mut events: Vec<TimeSourceEvent> =
@@ -387,7 +374,7 @@
             time_source,
             Arc::clone(&diagnostics),
         );
-        ClockManager::new(clock, time_source_manager, rtc, notifier, diagnostics, *TEST_TRACK)
+        ClockManager::new(clock, time_source_manager, rtc, diagnostics, *TEST_TRACK)
     }
 
     #[test]
@@ -421,43 +408,31 @@
     }
 
     #[test]
-    fn single_update_with_rtc_and_notifier() {
+    fn single_update_with_rtc() {
         let mut executor = fasync::Executor::new().unwrap();
 
         let clock = create_clock();
         let rtc = FakeRtc::valid(BACKSTOP_TIME);
         let diagnostics = Arc::new(FakeDiagnostics::new());
 
-        // Spawn test notifier and verify the initial state
-        let (utc, utc_requests) =
-            fidl::endpoints::create_proxy_and_stream::<ftime::UtcMarker>().unwrap();
-        let notifier = Notifier::new(ftime::UtcSource::Backstop);
-        notifier.handle_request_stream(utc_requests);
-        let mut fut1 = async { utc.watch_state().await.unwrap().source.unwrap() }.boxed();
-        assert_eq!(executor.run_until_stalled(&mut fut1), Poll::Ready(ftime::UtcSource::Backstop));
-
-        // Create a clock manager
+        // Create a clock manager.
         let monotonic_ref = zx::Time::get_monotonic();
         let clock_manager = create_clock_manager(
             Arc::clone(&clock),
             vec![Sample::new(monotonic_ref + OFFSET, monotonic_ref, STD_DEV)],
             Some(rtc.clone()),
-            Some(notifier),
             Arc::clone(&diagnostics),
         );
 
-        // Maintain the clock until no more work remains
+        // Maintain the clock until no more work remains.
         let monotonic_before = zx::Time::get_monotonic();
-        let mut fut2 = clock_manager.maintain_clock().boxed();
-        let _ = executor.run_until_stalled(&mut fut2);
+        let mut fut = clock_manager.maintain_clock().boxed();
+        let _ = executor.run_until_stalled(&mut fut);
         let updated_utc = clock.read().unwrap();
         let monotonic_after = zx::Time::get_monotonic();
 
-        // Check that the clocks and reported time source have been updated. The UTC
-        // should be bounded by the offset we supplied added to the monotonic window in which the
-        // calculation took place.
-        let mut fut3 = async { utc.watch_state().await.unwrap().source.unwrap() }.boxed();
-        assert_eq!(executor.run_until_stalled(&mut fut3), Poll::Ready(ftime::UtcSource::External));
+        // Check that the clocks have been updated. The UTC should be bounded by the offset we
+        // supplied added to the monotonic window in which the calculation took place.
         assert_geq!(updated_utc, monotonic_before + OFFSET);
         assert_leq!(updated_utc, monotonic_after + OFFSET);
         assert_geq!(rtc.last_set().unwrap(), monotonic_before + OFFSET);
@@ -473,7 +448,7 @@
     }
 
     #[test]
-    fn single_update_without_rtc_and_notifier() {
+    fn single_update_without_rtc() {
         let mut executor = fasync::Executor::new().unwrap();
 
         let clock = create_clock();
@@ -483,7 +458,6 @@
             Arc::clone(&clock),
             vec![Sample::new(monotonic_ref + OFFSET, monotonic_ref, STD_DEV)],
             None,
-            None,
             Arc::clone(&diagnostics),
         );
 
@@ -525,7 +499,6 @@
                 Sample::new(monotonic_ref + OFFSET_2, monotonic_ref, STD_DEV),
             ],
             None,
-            None,
             Arc::clone(&diagnostics),
         );
 
@@ -588,7 +561,6 @@
                 Sample::new(monotonic_ref + OFFSET + delta_offset, monotonic_ref, STD_DEV),
             ],
             None,
-            None,
             Arc::clone(&diagnostics),
         );
 
diff --git a/src/sys/time/timekeeper/src/diagnostics/inspect.rs b/src/sys/time/timekeeper/src/diagnostics/inspect.rs
index 93ca63c..e36a036 100644
--- a/src/sys/time/timekeeper/src/diagnostics/inspect.rs
+++ b/src/sys/time/timekeeper/src/diagnostics/inspect.rs
@@ -475,9 +475,7 @@
         crate::{
             enums::{SampleValidationError as SVE, StartClockSource, TimeSourceError as TSE},
             time_source::FakeTimeSource,
-            Notifier,
         },
-        fidl_fuchsia_time as ftime,
         fuchsia_inspect::{assert_inspect_tree, testing::AnyProperty},
     };
 
@@ -529,11 +527,8 @@
         inspector: &Inspector,
         include_monitor: bool,
     ) -> (InspectDiagnostics, Arc<zx::Clock>) {
-        let primary = PrimaryTrack {
-            time_source: FakeTimeSource::failing(),
-            clock: create_clock(),
-            notifier: Notifier::new(ftime::UtcSource::Backstop),
-        };
+        let primary =
+            PrimaryTrack { time_source: FakeTimeSource::failing(), clock: create_clock() };
         let monitor = match include_monitor {
             true => {
                 Some(MonitorTrack { time_source: FakeTimeSource::failing(), clock: create_clock() })
diff --git a/src/sys/time/timekeeper/src/main.rs b/src/sys/time/timekeeper/src/main.rs
index d2f09a3..c2bf37e 100644
--- a/src/sys/time/timekeeper/src/main.rs
+++ b/src/sys/time/timekeeper/src/main.rs
@@ -10,7 +10,6 @@
 mod diagnostics;
 mod enums;
 mod estimator;
-mod notifier;
 mod rtc;
 mod time_source;
 mod time_source_manager;
@@ -23,7 +22,6 @@
             CobaltDiagnostics, CompositeDiagnostics, Diagnostics, Event, InspectDiagnostics,
         },
         enums::{InitialClockState, InitializeRtcOutcome, Role, StartClockSource, Track},
-        notifier::Notifier,
         rtc::{Rtc, RtcImpl},
         time_source::{PushTimeSource, TimeSource},
         time_source_manager::TimeSourceManager,
@@ -72,7 +70,6 @@
 struct PrimaryTrack<T: TimeSource> {
     time_source: T,
     clock: Arc<zx::Clock>,
-    notifier: Notifier,
 }
 
 /// The information required to maintain UTC for the monitor track.
@@ -109,10 +106,6 @@
     );
 
     info!("constructing time sources");
-    let notifier = Notifier::new(match initial_clock_state(&utc_clock) {
-        InitialClockState::NotSet => ftime::UtcSource::Backstop,
-        InitialClockState::PreviouslySet => ftime::UtcSource::Unverified,
-    });
     let time_source_urls = match options.dev_time_sources {
         true => DEV_TEST_SOURCES,
         false => DEFAULT_SOURCES,
@@ -120,7 +113,6 @@
     let primary_track = PrimaryTrack {
         time_source: PushTimeSource::new(time_source_urls.primary.to_string()),
         clock: Arc::new(utc_clock),
-        notifier: notifier.clone(),
     };
     let monitor_track = time_source_urls.monitor.map(|url| MonitorTrack {
         time_source: PushTimeSource::new(url.to_string()),
@@ -171,11 +163,6 @@
     })
     .detach();
 
-    info!("serving notifier on servicefs");
-    fs.dir("svc").add_fidl_service(move |requests: ftime::UtcRequestStream| {
-        notifier.handle_request_stream(requests);
-    });
-
     fs.take_and_serve_directory_handle()?;
     Ok(fs.collect().await)
 }
@@ -207,11 +194,10 @@
 }
 
 /// Attempts to initialize a userspace clock from the current value of the real time clock.
-/// sending progress to diagnosistics and a notifier as appropriate.
+/// sending progress to diagnostics as appropriate.
 async fn set_clock_from_rtc<R: Rtc, D: Diagnostics>(
     rtc: &R,
     clock: &zx::Clock,
-    notifier: &mut Notifier,
     diagnostics: Arc<D>,
 ) {
     info!("reading initial RTC time.");
@@ -245,7 +231,6 @@
     if let Err(status) = clock.update(zx::ClockUpdate::new().value(time)) {
         warn!("failed to start UTC clock from RTC at time {}: {}", utc_chrono, status);
     } else {
-        notifier.set_source(ftime::UtcSource::Unverified).await;
         diagnostics
             .record(Event::StartClock { track: Track::Primary, source: StartClockSource::Rtc });
         info!("started UTC clock from RTC at time: {}", utc_chrono);
@@ -274,27 +259,17 @@
     let initial_clock_state = initial_clock_state(&primary.clock);
     diagnostics.record(Event::Initialized { clock_state: initial_clock_state });
 
-    match initial_clock_state {
-        InitialClockState::NotSet => {
-            if let Some(rtc) = optional_rtc.as_ref() {
-                set_clock_from_rtc(
-                    rtc,
-                    &mut primary.clock,
-                    &mut primary.notifier,
-                    Arc::clone(&diagnostics),
-                )
-                .await;
+    if let Some(rtc) = optional_rtc.as_ref() {
+        match initial_clock_state {
+            InitialClockState::NotSet => {
+                set_clock_from_rtc(rtc, &mut primary.clock, Arc::clone(&diagnostics)).await;
             }
-        }
-        InitialClockState::PreviouslySet => {
-            if optional_rtc.is_some() {
+            InitialClockState::PreviouslySet => {
                 diagnostics.record(Event::InitializeRtc {
                     outcome: InitializeRtcOutcome::ReadNotAttempted,
                     time: None,
                 });
             }
-            info!("clock was already running at initialization, reporting source as unverified");
-            primary.notifier.set_source(ftime::UtcSource::Unverified).await;
         }
     }
 
@@ -325,7 +300,6 @@
         primary.clock,
         primary_source_manager,
         optional_rtc,
-        Some(primary.notifier),
         Arc::clone(&diagnostics),
         Track::Primary,
     );
@@ -335,7 +309,6 @@
                 clock,
                 source_manager,
                 None,
-                None,
                 diagnostics,
                 Track::Monitor,
             )
@@ -356,7 +329,6 @@
         },
         fidl_fuchsia_time_external as ftexternal, fuchsia_zircon as zx,
         lazy_static::lazy_static,
-        std::task::Poll,
     };
 
     const NANOS_PER_SECOND: i64 = 1_000_000_000;
@@ -380,24 +352,19 @@
         (Arc::new(clock), initial_update_ticks)
     }
 
-    #[fasync::run_singlethreaded(test)]
-    async fn successful_update_single_notify_client_with_monitor() {
+    #[test]
+    fn successful_update_with_monitor() {
+        let mut executor = fasync::Executor::new().unwrap();
         let (primary_clock, primary_ticks) = create_clock();
         let (monitor_clock, monitor_ticks) = create_clock();
         let rtc = FakeRtc::valid(INVALID_RTC_TIME);
         let diagnostics = Arc::new(FakeDiagnostics::new());
 
-        let (utc, utc_requests) =
-            fidl::endpoints::create_proxy_and_stream::<ftime::UtcMarker>().unwrap();
         let monotonic_ref = zx::Time::get_monotonic();
         let internet_reachable = future::ok(());
 
-        // Spawning test notifier and verifying initial state
-        let notifier = Notifier::new(ftime::UtcSource::Backstop);
-        notifier.handle_request_stream(utc_requests);
-        assert_eq!(utc.watch_state().await.unwrap().source.unwrap(), ftime::UtcSource::Backstop);
-
-        let _task = fasync::Task::spawn(maintain_utc(
+        // Maintain UTC until no more work remains
+        let mut fut = maintain_utc(
             PrimaryTrack {
                 clock: Arc::clone(&primary_clock),
                 time_source: FakeTimeSource::events(vec![
@@ -408,7 +375,6 @@
                         STD_DEV,
                     )),
                 ]),
-                notifier: notifier.clone(),
             },
             Some(MonitorTrack {
                 clock: Arc::clone(&monitor_clock),
@@ -426,15 +392,16 @@
             internet_reachable,
             Arc::clone(&diagnostics),
             false,
-        ));
+        )
+        .boxed();
+        let _ = executor.run_until_stalled(&mut fut);
 
-        // Checking that the reported time source has been updated and the clocks are set.
-        assert_eq!(utc.watch_state().await.unwrap().source.unwrap(), ftime::UtcSource::External);
+        // Check that the clocks are set.
         assert!(primary_clock.get_details().unwrap().last_value_update_ticks > primary_ticks);
         assert!(monitor_clock.get_details().unwrap().last_value_update_ticks > monitor_ticks);
         assert!(rtc.last_set().is_some());
 
-        // Checking that the correct diagnostic events were logged.
+        // Check that the correct diagnostic events were logged.
         diagnostics.assert_events(&[
             Event::Initialized { clock_state: InitialClockState::NotSet },
             Event::InitializeRtc {
@@ -468,29 +435,21 @@
     }
 
     #[test]
-    fn no_update_invalid_rtc_single_notify_client() {
+    fn no_update_invalid_rtc() {
         let mut executor = fasync::Executor::new().unwrap();
         let (clock, initial_update_ticks) = create_clock();
         let rtc = FakeRtc::valid(INVALID_RTC_TIME);
         let diagnostics = Arc::new(FakeDiagnostics::new());
 
-        let (utc, utc_requests) =
-            fidl::endpoints::create_proxy_and_stream::<ftime::UtcMarker>().unwrap();
         let time_source = FakeTimeSource::events(vec![TimeSourceEvent::StatusChange {
             status: ftexternal::Status::Network,
         }]);
 
         let internet_reachable = future::ok(());
 
-        // Spawning test notifier and verifying the initial state
-        let notifier = Notifier::new(ftime::UtcSource::Backstop);
-        notifier.handle_request_stream(utc_requests);
-        let mut fut1 = async { utc.watch_state().await.unwrap().source.unwrap() }.boxed();
-        assert_eq!(executor.run_until_stalled(&mut fut1), Poll::Ready(ftime::UtcSource::Backstop));
-
         // Maintain UTC until no more work remains
-        let mut fut2 = maintain_utc(
-            PrimaryTrack { clock: Arc::clone(&clock), time_source, notifier: notifier.clone() },
+        let mut fut = maintain_utc(
+            PrimaryTrack { clock: Arc::clone(&clock), time_source },
             None,
             Some(rtc.clone()),
             internet_reachable,
@@ -498,11 +457,7 @@
             false,
         )
         .boxed();
-        let _ = executor.run_until_stalled(&mut fut2);
-
-        // Checking that the reported time source has not been updated
-        let mut fut3 = async { utc.watch_state().await.unwrap().source.unwrap() }.boxed();
-        assert_eq!(executor.run_until_stalled(&mut fut3), Poll::Pending);
+        let _ = executor.run_until_stalled(&mut fut);
 
         // Checking that the clock has not been updated yet
         assert_eq!(initial_update_ticks, clock.get_details().unwrap().last_value_update_ticks);
@@ -521,29 +476,21 @@
     }
 
     #[test]
-    fn no_update_valid_rtc_single_notify_client() {
+    fn no_update_valid_rtc() {
         let mut executor = fasync::Executor::new().unwrap();
         let (clock, initial_update_ticks) = create_clock();
         let rtc = FakeRtc::valid(VALID_RTC_TIME);
         let diagnostics = Arc::new(FakeDiagnostics::new());
 
-        let (utc, utc_requests) =
-            fidl::endpoints::create_proxy_and_stream::<ftime::UtcMarker>().unwrap();
         let time_source = FakeTimeSource::events(vec![TimeSourceEvent::StatusChange {
             status: ftexternal::Status::Network,
         }]);
 
         let internet_reachable = future::ok(());
 
-        // Spawning test notifier and verifying the initial state
-        let notifier = Notifier::new(ftime::UtcSource::Backstop);
-        notifier.handle_request_stream(utc_requests);
-        let mut fut1 = async { utc.watch_state().await.unwrap().source.unwrap() }.boxed();
-        assert_eq!(executor.run_until_stalled(&mut fut1), Poll::Ready(ftime::UtcSource::Backstop));
-
         // Maintain UTC until no more work remains
-        let mut fut2 = maintain_utc(
-            PrimaryTrack { clock: Arc::clone(&clock), time_source, notifier: notifier.clone() },
+        let mut fut = maintain_utc(
+            PrimaryTrack { clock: Arc::clone(&clock), time_source },
             None,
             Some(rtc.clone()),
             internet_reachable,
@@ -551,14 +498,7 @@
             false,
         )
         .boxed();
-        let _ = executor.run_until_stalled(&mut fut2);
-
-        // Checking that the reported time source has been updated to reflect the use of RTC
-        let mut fut3 = async { utc.watch_state().await.unwrap().source.unwrap() }.boxed();
-        assert_eq!(
-            executor.run_until_stalled(&mut fut3),
-            Poll::Ready(ftime::UtcSource::Unverified)
-        );
+        let _ = executor.run_until_stalled(&mut fut);
 
         // Checking that the clock was updated to use the valid RTC time.
         assert!(clock.get_details().unwrap().last_value_update_ticks > initial_update_ticks);
@@ -591,23 +531,15 @@
         let rtc = FakeRtc::valid(VALID_RTC_TIME);
         let diagnostics = Arc::new(FakeDiagnostics::new());
 
-        let (utc, utc_requests) =
-            fidl::endpoints::create_proxy_and_stream::<ftime::UtcMarker>().unwrap();
         let time_source = FakeTimeSource::events(vec![TimeSourceEvent::StatusChange {
             status: ftexternal::Status::Network,
         }]);
 
         let internet_reachable = future::ok(());
 
-        // Spawning test notifier and verifying the initial state
-        let notifier = Notifier::new(ftime::UtcSource::Backstop);
-        notifier.handle_request_stream(utc_requests);
-        let mut fut1 = async { utc.watch_state().await.unwrap().source.unwrap() }.boxed();
-        assert_eq!(executor.run_until_stalled(&mut fut1), Poll::Ready(ftime::UtcSource::Backstop));
-
         // Maintain UTC until no more work remains
-        let mut fut2 = maintain_utc(
-            PrimaryTrack { clock: Arc::clone(&clock), time_source, notifier: notifier.clone() },
+        let mut fut = maintain_utc(
+            PrimaryTrack { clock: Arc::clone(&clock), time_source },
             None,
             Some(rtc.clone()),
             internet_reachable,
@@ -615,15 +547,8 @@
             false,
         )
         .boxed();
-        let _ = executor.run_until_stalled(&mut fut2);
+        let _ = executor.run_until_stalled(&mut fut);
 
-        // Checking that the reported time source has been updated to reflect the fact we're not
-        // using backstop but can't verify whatever the previous source was.
-        let mut fut3 = async { utc.watch_state().await.unwrap().source.unwrap() }.boxed();
-        assert_eq!(
-            executor.run_until_stalled(&mut fut3),
-            Poll::Ready(ftime::UtcSource::Unverified)
-        );
         // Checking that neither the clock nor the RTC were updated.
         assert_eq!(clock.get_details().unwrap().last_value_update_ticks, initial_update_ticks);
         assert_eq!(rtc.last_set(), None);
diff --git a/src/sys/time/timekeeper/src/notifier.rs b/src/sys/time/timekeeper/src/notifier.rs
deleted file mode 100644
index 50e18b2..0000000
--- a/src/sys/time/timekeeper/src/notifier.rs
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2020 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#![deny(unused)]
-
-use {
-    fidl_fuchsia_time as ftime, fuchsia_async as fasync, fuchsia_zircon as zx,
-    futures::{lock::Mutex, StreamExt as _},
-    log::{info, warn},
-    std::sync::Arc,
-};
-
-/// Notifies waiting clients when the clock has been updated, wrapped in a lock to allow
-/// sharing between tasks.
-#[derive(Clone, Debug)]
-pub struct Notifier(Arc<Mutex<NotifyInner>>);
-
-impl Notifier {
-    /// Constructs a new `Notifier` initially set to the supplied source.
-    pub fn new(source: ftime::UtcSource) -> Self {
-        Notifier(Arc::new(Mutex::new(NotifyInner { source, clients: Vec::new() })))
-    }
-
-    /// Changes the source to the supplied value.
-    pub async fn set_source(&self, source: ftime::UtcSource) {
-        let monotonic = zx::Time::get_monotonic().into_nanos();
-        self.0.lock().await.set_source(source, monotonic);
-    }
-
-    /// Spawns an async task to handle requests on this channel.
-    pub fn handle_request_stream(&self, requests: ftime::UtcRequestStream) {
-        let notifier = self.clone();
-        fasync::Task::spawn(async move {
-            let mut counted_requests = requests.enumerate();
-            let mut last_seen_state = notifier.0.lock().await.source;
-            while let Some((request_count, Ok(ftime::UtcRequest::WatchState { responder }))) =
-                counted_requests.next().await
-            {
-                let mut n = notifier.0.lock().await;
-                // we return immediately if this is the first request on this channel, or if there
-                // has been a new update since the last request.
-                if request_count == 0 || last_seen_state != n.source {
-                    n.reply(responder, zx::Time::get_monotonic().into_nanos());
-                } else {
-                    n.register(responder);
-                }
-                last_seen_state = n.source;
-            }
-        })
-        .detach();
-    }
-}
-
-/// Notifies waiting clients when the clock has been updated.
-#[derive(Debug)]
-struct NotifyInner {
-    /// The current source for our UTC approximation.
-    source: ftime::UtcSource,
-    /// All clients waiting for an update to UTC's time.
-    clients: Vec<ftime::UtcWatchStateResponder>,
-}
-
-impl NotifyInner {
-    /// Reply to a client with the current UtcState.
-    fn reply(&self, responder: ftime::UtcWatchStateResponder, update_time: i64) {
-        if let Err(why) = responder.send(ftime::UtcState {
-            timestamp: Some(update_time),
-            source: Some(self.source),
-            ..ftime::UtcState::EMPTY
-        }) {
-            warn!("failed to notify a client of an update: {:?}", why);
-        }
-    }
-
-    /// Registers a client to be later notified that a clock update has occurred.
-    fn register(&mut self, responder: ftime::UtcWatchStateResponder) {
-        info!("registering a client for notifications");
-        self.clients.push(responder);
-    }
-
-    /// Increases the revision counter by 1 and notifies any clients waiting on updates from
-    /// previous revisions, returning true iff the source changed as a result of the call.
-    fn set_source(&mut self, source: ftime::UtcSource, update_time: i64) -> bool {
-        if self.source != source {
-            self.source = source;
-            let clients = std::mem::replace(&mut self.clients, vec![]);
-            info!("UTC source changed to {:?}, notifying {} clients", source, clients.len());
-            for responder in clients {
-                self.reply(responder, update_time);
-            }
-            true
-        } else {
-            info!("received UTC source update but the actual source didn't change.");
-            false
-        }
-    }
-}
diff --git a/src/testing/emulator/BUILD.gn b/src/testing/emulator/BUILD.gn
index afeb11b..0693b4e 100644
--- a/src/testing/emulator/BUILD.gn
+++ b/src/testing/emulator/BUILD.gn
@@ -109,6 +109,7 @@
       "emulator.go",
       "emulator_test.go",
     ]
+    deps = [ "//tools/qemu" ]
   }
 
   go_test("emulator_tests") {
diff --git a/src/testing/emulator/emulator.go b/src/testing/emulator/emulator.go
index 8cc8e82..8188b95 100644
--- a/src/testing/emulator/emulator.go
+++ b/src/testing/emulator/emulator.go
@@ -18,6 +18,8 @@
 	"runtime"
 	"strings"
 	"time"
+
+	"go.fuchsia.dev/fuchsia/tools/qemu"
 )
 
 // Untar untars a tar.gz file into a directory.
@@ -122,6 +124,24 @@
 	Emulator Emulator
 }
 
+// commandBuilder is used to build the emulator command-line.
+//
+// See //tools/qemu for the most common implementations.
+type commandBuilder interface {
+	SetBinary(string)
+	SetKernel(string)
+	SetInitrd(string)
+	SetTarget(qemu.Target, bool) error
+	SetFlag(...string)
+	SetMemory(bytes int)
+	AddNetwork(qemu.Netdev)
+	AddHCI(qemu.HCI) error
+	AddKernelArg(string)
+	AddUSBDrive(qemu.Drive)
+	AddVirtioBlkPciDrive(qemu.Drive)
+	Build() ([]string, error)
+}
+
 // Unpack unpacks the QEMU distribution.
 //
 // TODO(fxbug.dev/58804): Replace all call sites to UnpackFrom.
@@ -222,129 +242,83 @@
 	return X64, fmt.Errorf("unknown target CPU: %s", name)
 }
 
-func (d *Distribution) appendCommonQemuArgs(params Params, args []string) []string {
-	// Append architecture specific QEMU options.  These options
-	// are meant to mirror those used by `fx qemu`.
-	if params.Arch == Arm64 {
-		args = append(args, "-machine", "virtualization=true",
-			"-cpu", "max", "-machine", "virt-2.12,gic-version=3")
-	} else if params.Arch == X64 {
-		args = append(args, "-machine", "q35", "-cpu", "Haswell,+smap,-check,-fsgsbase")
-		if !params.DisableKVM && hostSupportsKVM(params.Arch) {
-			args = append(args, "-enable-kvm")
-		}
-		if !params.DisableDebugExit {
-			args = append(args, "-device", "isa-debug-exit,iobase=0xf4,iosize=0x04")
-		}
-	} else {
-		panic("unsupported architecture")
-	}
-
+func (d *Distribution) appendCommonQemuArgs(params Params, b commandBuilder) {
+	nic := "none"
 	if params.Networking {
-		args = append(args, "-nic", "tap,ifname=qemu,script=no,downscript=no,model=virtio-net-pci")
-	} else {
-		args = append(args, "-nic", "none")
+		nic = "tap,ifname=qemu,script=no,downscript=no,model=virtio-net-pci"
 	}
-
-	return args
+	b.SetFlag("-nic", nic)
 }
 
-func (d *Distribution) appendCommonFemuArgs(params Params, args []string) []string {
+func (d *Distribution) appendCommonFemuArgs(params Params, b commandBuilder) {
 	// These options should mirror what's used by `fx emu`.
-
-	if params.Arch == Arm64 {
-		args = append(args, "-avd-arch arm64")
+	if params.Arch == X64 && !params.DisableDebugExit {
+		b.SetFlag("-device", "isa-debug-exit,iobase=0xf4,iosize=0x04")
 	}
 
-	args = append(args, "-feature", "VirtioInput,GLDirectMem,KVM,Vulkan")
-	args = append(args, "-gpu", "auto")
-	args = append(args, "-no-window")
-
-	// Everything after `-fuchsia` is passed directly to qemu by femu.
-	args = append(args, "-fuchsia")
-	// `fx emu` has slightly different semantics to `fx qemu` for architecture-specific stuff.
-	if params.Arch == Arm64 {
-		args = append(args, "-machine", "virt")
-	} else if params.Arch == X64 {
-		args = append(args, "-machine", "q35")
-		if !params.DisableKVM && hostSupportsKVM(params.Arch) {
-			args = append(args, "-enable-kvm")
-			args = append(args, "-cpu", "host,migratable=no,+invtsc")
-		} else {
-			args = append(args, "-cpu", "Haswell,+smap,-check,-fsgsbase")
-		}
-		if !params.DisableDebugExit {
-			args = append(args, "-device", "isa-debug-exit,iobase=0xf4,iosize=0x04")
-		}
-	} else {
-		panic("unsupported architecture")
-	}
-	args = append(args, "-vga", "none")
-
 	if params.Networking {
-		args = append(args, "-netdev", "type=tap,ifname=qemu,id=net0,script=no")
-		args = append(args, "-device", "virtio-net-pci,vectors=8,netdev=net0,mac=52:54:00:63:5e:7a")
-	} else {
-		args = append(args, "-net", "none")
+		b.AddNetwork(qemu.Netdev{
+			ID:  "net0",
+			MAC: "52:54:00:63:5e:7a",
+			Tap: &qemu.NetdevTap{Name: "qemu"},
+		})
 	}
-
-	return args
 }
 
-func (d *Distribution) appendCommonArgs(params Params, args []string) []string {
-	args = append(args, "-kernel", d.kernelPath(params.Arch))
-	if d.Emulator == Qemu {
-		args = d.appendCommonQemuArgs(params, args)
-	} else if d.Emulator == Femu {
-		args = d.appendCommonFemuArgs(params, args)
+func (d *Distribution) setCommonArgs(params Params, b commandBuilder) {
+	b.SetBinary(d.systemPath(params.Arch))
+	b.SetKernel(d.kernelPath(params.Arch))
+	b.SetInitrd(params.ZBI)
+
+	enableKVM := true
+	if params.Arch == Arm64 {
+		b.SetTarget(qemu.TargetEnum.AArch64, enableKVM)
+	} else {
+		b.SetTarget(qemu.TargetEnum.X86_64, enableKVM)
 	}
 
-	diskParams := []string{}
-	hasUsbDisk := false
+	if d.Emulator == Qemu {
+		d.appendCommonQemuArgs(params, b)
+	} else if d.Emulator == Femu {
+		d.appendCommonFemuArgs(params, b)
+	}
 
+	hasUsbDisk := false
 	for i, disk := range params.Disks {
-		drive_id := fmt.Sprintf("disk%02d", i)
-		diskParams = append(diskParams, "-drive", "if=none,id="+drive_id+",file="+disk.Path+",format=raw")
+		drive := qemu.Drive{ID: fmt.Sprintf("disk%02d", i), File: disk.Path}
 		if disk.USB {
 			hasUsbDisk = true
-			diskParams = append(diskParams, "-device", "usb-storage,drive="+drive_id)
+			b.AddUSBDrive(drive)
 		} else {
-			diskParams = append(diskParams, "-device", "virtio-blk-pci,drive="+drive_id)
+			b.AddVirtioBlkPciDrive(drive)
 		}
 	}
 
 	if hasUsbDisk {
 		// If we have USB disks, we also need to emulate a USB host controller.
-		args = append(args, "-device", "qemu-xhci,id=xhci")
+		b.AddHCI(qemu.XHCI)
 	}
 
-	args = append(args, diskParams...)
-
 	// Ask QEMU to emit a message on stderr once the VM is running
 	// so we'll know whether QEMU has started or not.
-	args = append(args, "-trace", "enable=vm_state_notify")
-	args = append(args, "-smp", "4,threads=2")
+	b.SetFlag("-trace", "enable=vm_state_notify")
+	b.SetFlag("-nographic")
+	b.SetFlag("-smp", "4,threads=2")
+	b.SetMemory(8192)
 
-	args = append(args, "-m", "8192")
-	args = append(args, "-nographic")
-
-	return args
-}
-
-func getCommonKernelCmdline(params Params) string {
-	cmdline := "kernel.serial=legacy " +
-		"kernel.entropy-mixin=1420bb81dc0396b37cc2d0aa31bb2785dadaf9473d0780ecee1751afb5867564 " +
-		"kernel.halt-on-panic=true " +
-		// Disable lockup detector heartbeats. In emulated environments, virtualized CPUs
-		// may be starved or fail to execute in a timely fashion, resulting in apparent
-		// lockups. See fxbug.dev/65990.
-		"kernel.lockup-detector.heartbeat-period-ms=0 " +
-		"kernel.lockup-detector.heartbeat-age-threshold-ms=0"
+	b.AddKernelArg("kernel.serial=legacy")
+	b.AddKernelArg("kernel.entropy-mixin=1420bb81dc0396b37cc2d0aa31bb2785dadaf9473d0780ecee1751afb5867564")
+	b.AddKernelArg("kernel.halt-on-panic=true")
+	// Disable lockup detector heartbeats. In emulated environments, virtualized CPUs
+	// may be starved or fail to execute in a timely fashion, resulting in apparent
+	// lockups. See fxbug.dev/65990.
+	b.AddKernelArg("kernel.lockup-detector.heartbeat-period-ms=0")
+	b.AddKernelArg("kernel.lockup-detector.heartbeat-age-threshold-ms=0")
 	if params.AppendCmdline != "" {
-		cmdline += " "
-		cmdline += params.AppendCmdline
+		for _, arg := range strings.Split(params.AppendCmdline, " ") {
+			b.AddKernelArg(arg)
+		}
 	}
-	return cmdline
 }
 
 // Create creates an instance of the emulator with the given parameters.
@@ -352,15 +326,24 @@
 	if params.ZBI == "" {
 		panic("ZBI must be specified")
 	}
-	args := []string{}
-	args = d.appendCommonArgs(params, args)
-	args = append(args, "-initrd", params.ZBI)
-	args = append(args, "-append", getCommonKernelCmdline(params))
-	path := d.systemPath(params.Arch)
-	fmt.Printf("Running %s %s\n", path, args)
+
+	var b commandBuilder
+	if d.Emulator == Femu {
+		b = qemu.NewAEMUCommandBuilder()
+	} else {
+		b = &qemu.QEMUCommandBuilder{}
+	}
+
+	d.setCommonArgs(params, b)
+	args, err := b.Build()
+	if err != nil {
+		panic(err)
+	}
+
+	fmt.Printf("Running %s %s\n", args[0], args[1:])
 
 	i := &Instance{
-		cmd:      exec.Command(path, args...),
+		cmd:      exec.Command(args[0], args[1:]...),
 		emulator: d.Emulator,
 	}
 	// QEMU looks in the cwd for some specially named files, in particular
@@ -405,7 +388,7 @@
 func (d *Distribution) runNonInteractive(root, toRun, hostPathMinfsBinary, hostPathZbiBinary string, params Params) (string, string, error) {
 	// Write runcmds that mounts the results disk, runs the requested command, and
 	// shuts down.
-	b := `mkdir /tmp/testdata-fs
+	script := `mkdir /tmp/testdata-fs
 waitfor class=block topo=/dev/sys/pci/00:06.0/virtio-block/block timeout=60000
 mount /dev/sys/pci/00:06.0/virtio-block/block /tmp/testdata-fs
 ` + toRun + ` 2>/tmp/testdata-fs/err.txt >/tmp/testdata-fs/log.txt
@@ -413,7 +396,7 @@
 dm poweroff
 `
 	runcmds := filepath.Join(root, "runcmds.txt")
-	if err := ioutil.WriteFile(runcmds, []byte(b), 0666); err != nil {
+	if err := ioutil.WriteFile(runcmds, []byte(script), 0666); err != nil {
 		return "", "", err
 	}
 	// Make a minfs filesystem to mount in the target.
@@ -424,28 +407,34 @@
 	}
 
 	// Create the new initrd that references the runcmds file.
-	zbi := filepath.Join(root, "a.zbi")
-	cmd = exec.Command(hostPathZbiBinary, "-o", zbi, params.ZBI, "-e", "runcmds="+runcmds)
+	oldZBI := params.ZBI
+	params.ZBI = filepath.Join(root, "a.zbi")
+	cmd = exec.Command(hostPathZbiBinary, "-o", params.ZBI, oldZBI, "-e", "runcmds="+runcmds)
 	if err := cmd.Run(); err != nil {
 		return "", "", err
 	}
 
-	// Build up the emulator command line from common arguments and the extra goop to
-	// add the temporary disk at 00:06.0. This follows how infra runs qemu with an
-	// extra disk via botanist.
-	path := d.systemPath(params.Arch)
-	args := []string{}
-	args = d.appendCommonArgs(params, args)
-	args = append(args, "-initrd", zbi)
-	args = append(args, "-object", "iothread,id=resultiothread")
-	args = append(args, "-drive", "id=resultdisk,file="+fs+",format=raw,if=none,cache=unsafe,aio=threads")
-	args = append(args, "-device", "virtio-blk-pci,drive=resultdisk,iothread=resultiothread,addr=6.0")
-	cmdline := getCommonKernelCmdline(params)
-	cmdline += " zircon.autorun.boot=/boot/bin/sh+/boot/runcmds"
-	args = append(args, "-append", cmdline)
+	var b commandBuilder
+	if d.Emulator == Femu {
+		b = qemu.NewAEMUCommandBuilder()
+	} else {
+		b = &qemu.QEMUCommandBuilder{}
+	}
 
-	fmt.Printf("Running non-interactive %s %s\n", path, args)
-	cmd = exec.Command(path, args...)
+	d.setCommonArgs(params, b)
+
+	// Add the temporary disk at 00:06.0. This follows how infra runs qemu with an extra
+	// disk via botanist.
+	b.AddVirtioBlkPciDrive(qemu.Drive{ID: "resultdisk", File: fs, Addr: "6.0"})
+	b.AddKernelArg("zircon.autorun.boot=/boot/bin/sh+/boot/runcmds")
+
+	args, err := b.Build()
+	if err != nil {
+		return "", "", err
+	}
+	fmt.Printf("Running non-interactive %s %s\n", args[0], args[1:])
+
+	cmd = exec.Command(args[0], args[1:]...)
 	cmd.Stdout = os.Stdout
 	cmd.Stderr = os.Stderr
 	// QEMU looks in the cwd for some specially named files, in particular
diff --git a/src/testing/sl4f/BUILD.gn b/src/testing/sl4f/BUILD.gn
index c019b58..6c4a717 100644
--- a/src/testing/sl4f/BUILD.gn
+++ b/src/testing/sl4f/BUILD.gn
@@ -87,6 +87,7 @@
     "//sdk/fidl/fuchsia.wlan.common:fuchsia.wlan.common-rustc",
     "//sdk/fidl/fuchsia.wlan.device:fuchsia.wlan.device-rustc",
     "//sdk/fidl/fuchsia.wlan.device.service:fuchsia.wlan.device.service-rustc",
+    "//sdk/fidl/fuchsia.wlan.internal:fuchsia.wlan.internal-rustc",
     "//sdk/fidl/fuchsia.wlan.policy:fuchsia.wlan.policy-rustc",
     "//sdk/fidl/fuchsia.wlan.product.deprecatedconfiguration:fuchsia.wlan.product.deprecatedconfiguration-rustc",
     "//sdk/fidl/fuchsia.wlan.sme:fuchsia.wlan.sme-rustc",
diff --git a/src/testing/sl4f/src/wlan/commands.rs b/src/testing/sl4f/src/wlan/commands.rs
index 180637b..6dfb955 100644
--- a/src/testing/sl4f/src/wlan/commands.rs
+++ b/src/testing/sl4f/src/wlan/commands.rs
@@ -5,14 +5,74 @@
 use crate::server::Facade;
 use anyhow::{format_err, Error};
 use async_trait::async_trait;
+use fidl_fuchsia_wlan_common as fidl_common;
+use fidl_fuchsia_wlan_internal as fidl_internal;
 use fuchsia_syslog::macros::*;
+use serde::{Deserialize, Serialize};
 use serde_json::{to_value, Value};
+use std::collections::HashMap;
 
 // Testing helper methods
 use crate::wlan::facade::WlanFacade;
 
 use crate::common_utils::common::parse_u64_identifier;
 
+// We're using serde's "remote derive" feature to allow us to derive (De)Serialize for a third-
+// party type (i.e. fidl_internal::BssDescription). See here for more info:
+// https://serde.rs/remote-derive.html
+
+#[derive(Serialize, Deserialize)]
+#[serde(remote = "fidl_common::Cbw")]
+#[repr(u32)]
+pub enum CbwDef {
+    Cbw20 = 0,
+    Cbw40 = 1,
+    Cbw40Below = 2,
+    Cbw80 = 3,
+    Cbw160 = 4,
+    Cbw80P80 = 5,
+}
+
+#[derive(Serialize, Deserialize)]
+#[serde(remote = "fidl_common::WlanChan")]
+pub struct WlanChanDef {
+    pub primary: u8,
+    #[serde(with = "CbwDef")]
+    pub cbw: fidl_common::Cbw,
+    pub secondary80: u8,
+}
+
+#[derive(Serialize, Deserialize)]
+#[serde(remote = "fidl_internal::BssTypes")]
+pub enum BssTypesDef {
+    Infrastructure = 1,
+    Personal = 2,
+    Independent = 3,
+    Mesh = 4,
+    AnyBss = 5,
+}
+
+#[derive(Serialize, Deserialize)]
+#[serde(remote = "fidl_internal::BssDescription")]
+struct BssDescriptionDef {
+    pub bssid: [u8; 6],
+    #[serde(with = "BssTypesDef")]
+    pub bss_type: fidl_internal::BssTypes,
+    pub beacon_period: u16,
+    pub timestamp: u64,
+    pub local_time: u64,
+    pub cap: u16,
+    pub ies: Vec<u8>,
+    #[serde(with = "WlanChanDef")]
+    pub chan: fidl_common::WlanChan,
+    pub rssi_dbm: i8,
+    pub snr_db: i8,
+}
+#[derive(serde::Serialize)]
+struct BssDescriptionWrapper<'a>(
+    #[serde(with = "BssDescriptionDef")] &'a fidl_internal::BssDescription,
+);
+
 #[async_trait(?Send)]
 impl Facade for WlanFacade {
     async fn handle_request(&self, method: String, args: Value) -> Result<Value, Error> {
@@ -24,6 +84,26 @@
                 // return the scan results
                 to_value(results).map_err(|e| format_err!("error handling scan results: {}", e))
             }
+            "scan_for_bss_info" => {
+                fx_log_info!(tag: "WlanFacade", "performing wlan scan");
+                let results = self.scan_for_bss_info().await?;
+                fx_log_info!(tag: "WlanFacade", "received {:?} scan results", results.len());
+                // convert all BssDescription, which can't be serialized, to BssDescriptionWrapper
+                let results: HashMap<String, Vec<BssDescriptionWrapper<'_>>> = results
+                    .iter()
+                    .map(|(ssid, bss_desc)| {
+                        (
+                            String::from_utf8(ssid.clone()).unwrap(),
+                            bss_desc
+                                .iter()
+                                .map(|bss_desc| BssDescriptionWrapper(&**bss_desc))
+                                .collect(),
+                        )
+                    })
+                    .collect();
+                // return the scan results
+                to_value(results).map_err(|e| format_err!("error handling scan results: {}", e))
+            }
             "connect" => {
                 let target_ssid = match args.get("target_ssid") {
                     Some(ssid) => {
@@ -49,8 +129,16 @@
                     _ => vec![0; 0],
                 };
 
+                let target_bss_desc = match args.get("target_bss_desc") {
+                    Some(bss_desc_json) => {
+                        let bss_desc = BssDescriptionDef::deserialize(bss_desc_json)?;
+                        Some(Box::new(bss_desc))
+                    }
+                    None => None,
+                };
+
                 fx_log_info!(tag: "WlanFacade", "performing wlan connect to SSID: {:?}", target_ssid);
-                let results = self.connect(target_ssid, target_pwd).await?;
+                let results = self.connect(target_ssid, target_pwd, target_bss_desc).await?;
                 to_value(results)
                     .map_err(|e| format_err!("error handling connection result: {}", e))
             }
diff --git a/src/testing/sl4f/src/wlan/facade.rs b/src/testing/sl4f/src/wlan/facade.rs
index 55b7dbd..931636e 100644
--- a/src/testing/sl4f/src/wlan/facade.rs
+++ b/src/testing/sl4f/src/wlan/facade.rs
@@ -6,9 +6,11 @@
 use anyhow::{Context as _, Error};
 use fidl_fuchsia_wlan_device;
 use fidl_fuchsia_wlan_device_service::{DeviceServiceMarker, DeviceServiceProxy};
+use fidl_fuchsia_wlan_internal as fidl_internal;
 use fuchsia_component::client::connect_to_service;
 use fuchsia_zircon as zx;
 use parking_lot::RwLock;
+use std::collections::HashMap;
 
 // WlanFacade: proxies commands from sl4f test to proper fidl APIs
 //
@@ -71,13 +73,43 @@
         Ok(ssids)
     }
 
-    pub async fn connect(&self, target_ssid: Vec<u8>, target_pwd: Vec<u8>) -> Result<bool, Error> {
+    pub async fn scan_for_bss_info(
+        &self,
+    ) -> Result<HashMap<Vec<u8>, Vec<Box<fidl_internal::BssDescription>>>, Error> {
+        // get the first client interface
+        let sme_proxy = wlan_service_util::client::get_first_sme(&self.wlan_svc)
+            .await
+            .context("Scan: failed to get client iface sme proxy")?;
+
+        // start the scan
+        let mut results =
+            wlan_service_util::client::passive_scan(&sme_proxy).await.context("Scan failed")?;
+
+        // send the bss descriptions back to the test
+        let mut hashmap = HashMap::new();
+        for bss in results.drain(..) {
+            if let Some(bss_desc) = bss.bss_desc {
+                let entry = hashmap.entry(bss.ssid).or_insert(vec![]);
+                entry.push(bss_desc);
+            }
+        }
+
+        Ok(hashmap)
+    }
+
+    pub async fn connect(
+        &self,
+        target_ssid: Vec<u8>,
+        target_pwd: Vec<u8>,
+        target_bss_desc: Option<Box<fidl_internal::BssDescription>>,
+    ) -> Result<bool, Error> {
         // get the first client interface
         let sme_proxy = wlan_service_util::client::get_first_sme(&self.wlan_svc)
             .await
             .context("Connect: failed to get client iface sme proxy")?;
 
-        wlan_service_util::client::connect(&sme_proxy, target_ssid, target_pwd).await
+        wlan_service_util::client::connect(&sme_proxy, target_ssid, target_pwd, target_bss_desc)
+            .await
     }
 
     /// Destroys a WLAN interface by input interface ID.
diff --git a/src/tests/femu/femu_test.go b/src/tests/femu/femu_test.go
index 1c6a313..b160918 100644
--- a/src/tests/femu/femu_test.go
+++ b/src/tests/femu/femu_test.go
@@ -91,5 +91,5 @@
 
 	// Check that the emulated disk is there.
 	i.RunCommand("lsblk")
-	i.WaitForLogMessage("/dev/sys/pci/00:01.0/virtio-block/block")
+	i.WaitForLogMessage("/dev/sys/pci/00:03.0/virtio-block/block")
 }
diff --git a/src/tests/fidl/conformance_suite/recursive_depth.gidl b/src/tests/fidl/conformance_suite/recursive_depth.gidl
index 103c60f..b93700e 100644
--- a/src/tests/fidl/conformance_suite/recursive_depth.gidl
+++ b/src/tests/fidl/conformance_suite/recursive_depth.gidl
@@ -5,7 +5,7 @@
 
 success("RecursiveOptionalStructUnderLimit") {
     // TODO(fxbug.dev/53616) Enable in all bindings.
-    bindings_allowlist = [llcpp, rust],
+    bindings_allowlist = [llcpp, rust, go],
     value = RecursiveOptionalStruct { // 0
     inner: RecursiveOptionalStruct { // 1
     inner: RecursiveOptionalStruct { // 2
@@ -171,7 +171,7 @@
 
 success("RecursiveOptionalStructArrayWrapperUnderLimit") {
     // TODO(fxbug.dev/53616) Enable in all bindings.
-    bindings_allowlist = [llcpp, rust],
+    bindings_allowlist = [llcpp, rust, go],
     value = RecursiveOptionalStructArrayWrapper { // 0
     arr: [ // 0
     RecursiveOptionalStruct { // 0
@@ -345,7 +345,7 @@
 
 success("RecursiveOptionalAndUnionUnderLimit") {
     // TODO(fxbug.dev/53616) Enable in all bindings.
-    bindings_allowlist = [llcpp, rust],
+    bindings_allowlist = [llcpp, rust, go],
     value = RecursiveOptionalAndUnionStruct { // 0
     u: RecursiveOptionalAndUnion { // 0
     recursive_optional: RecursiveOptionalStruct { // 1
@@ -521,7 +521,7 @@
 
 success("RecursiveOptionalAndTableUnderLimit") {
     // TODO(fxbug.dev/53616) Enable in all bindings.
-    bindings_allowlist = [llcpp, rust],
+    bindings_allowlist = [llcpp, rust, go],
     value = RecursiveOptionalAndTableStruct { // 0
     t: RecursiveOptionalAndTable { // 0
     recursive_optional: RecursiveOptionalStruct { // 2
@@ -695,7 +695,7 @@
 
 success("RecursiveOptionalAndVectorUnderLimit") {
     // TODO(fxbug.dev/53616) Enable in all bindings.
-    bindings_allowlist = [llcpp, rust],
+    bindings_allowlist = [llcpp, rust, go],
     value = RecursiveOptionalAndVectorStruct { // 0
     vec: [ RecursiveOptionalStruct { // 1
     inner: RecursiveOptionalStruct { // 2
@@ -867,7 +867,7 @@
 
 success("RecursiveOptionalStructWithStringUnderLimit") {
     // TODO(fxbug.dev/53616) Enable in all bindings.
-    bindings_allowlist = [llcpp, rust],
+    bindings_allowlist = [llcpp, rust, go],
     value = RecursiveOptionalStructWithString { // 0
     inner: RecursiveOptionalStructWithString { // 1
     inner: RecursiveOptionalStructWithString { // 2
diff --git a/src/tests/microbenchmarks/lazy_dir.cc b/src/tests/microbenchmarks/lazy_dir.cc
index 8926af1..ebf60f8 100644
--- a/src/tests/microbenchmarks/lazy_dir.cc
+++ b/src/tests/microbenchmarks/lazy_dir.cc
@@ -80,8 +80,8 @@
     dir->AddEntry({id++, name, V_TYPE_FILE});
   }
 
+  fs::VdirCookie cookie;
   while (state->KeepRunning()) {
-    fs::vdircookie_t cookie;
     size_t real_len = 0;
     while (dir->Readdir(&cookie, buffer.data(), buffer_size, &real_len) != ZX_OK) {
       ZX_ASSERT(real_len != 0);
diff --git a/src/tests/microbenchmarks/pseudo_dir.cc b/src/tests/microbenchmarks/pseudo_dir.cc
index 53fe564..22e84ce 100644
--- a/src/tests/microbenchmarks/pseudo_dir.cc
+++ b/src/tests/microbenchmarks/pseudo_dir.cc
@@ -83,8 +83,8 @@
     dir->AddEntry(name, file);
   }
 
+  fs::VdirCookie cookie;
   while (state->KeepRunning()) {
-    fs::vdircookie_t cookie;
     size_t real_len = 0;
     do {
       auto status = dir->Readdir(&cookie, buffer.data(), buffer.size(), &real_len);
diff --git a/tools/botanist/BUILD.gn b/tools/botanist/BUILD.gn
index 5b15b0c..6e65894 100644
--- a/tools/botanist/BUILD.gn
+++ b/tools/botanist/BUILD.gn
@@ -37,6 +37,8 @@
     "device.go",
     "device_test.go",
     "errors.go",
+    "gce.go",
+    "gce_test.go",
     "qemu.go",
     "target.go",
   ]
diff --git a/tools/botanist/cmd/run.go b/tools/botanist/cmd/run.go
index 987c4c6..c50cadb 100644
--- a/tools/botanist/cmd/run.go
+++ b/tools/botanist/cmd/run.go
@@ -486,6 +486,12 @@
 		}
 		t, err := target.NewDeviceTarget(ctx, cfg, opts)
 		return t, err
+	case "gce":
+		var cfg target.GCEConfig
+		if err := json.Unmarshal(obj, &cfg); err != nil {
+			return nil, fmt.Errorf("invalid GCE config found: %v", err)
+		}
+		return target.NewGCETarget(ctx, cfg, opts)
 	default:
 		return nil, fmt.Errorf("unknown type found: %q", x.Type)
 	}
diff --git a/tools/botanist/target/gce.go b/tools/botanist/target/gce.go
new file mode 100644
index 0000000..679f5e1
--- /dev/null
+++ b/tools/botanist/target/gce.go
@@ -0,0 +1,299 @@
+// Copyright 2020 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package target
+
+import (
+	"context"
+	"crypto/rand"
+	"crypto/rsa"
+	"crypto/x509"
+	"encoding/json"
+	"encoding/pem"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"os/exec"
+	"os/user"
+
+	"go.fuchsia.dev/fuchsia/tools/bootserver"
+	"go.fuchsia.dev/fuchsia/tools/lib/logger"
+
+	"golang.org/x/crypto/ssh"
+)
+
+const (
+	gcemClientBinary  = "./gcem_client"
+	gceSerialEndpoint = "ssh-serialport.googleapis.com:9600"
+)
+
+// gceSerial is a ReadWriteCloser that talks to a GCE serial port via SSH.
+type gceSerial struct {
+	in     io.WriteCloser
+	out    io.Reader
+	sess   *ssh.Session
+	client *ssh.Client
+}
+
+func newGCESerial(pkeyPath, username, endpoint string) (*gceSerial, error) {
+	// Load the pkey and use it to dial the GCE serial port.
+	data, err := ioutil.ReadFile(pkeyPath)
+	if err != nil {
+		return nil, err
+	}
+	signer, err := ssh.ParsePrivateKey(data)
+	if err != nil {
+		return nil, err
+	}
+	sshConfig := &ssh.ClientConfig{
+		User: username,
+		Auth: []ssh.AuthMethod{
+			ssh.PublicKeys(signer),
+		},
+		// TODO(rudymathu): Replace this with google ssh serial port key.
+		HostKeyCallback: ssh.InsecureIgnoreHostKey(),
+	}
+	client, err := ssh.Dial("tcp", endpoint, sshConfig)
+	if err != nil {
+		return nil, err
+	}
+
+	// Create an SSH shell and wire up stdio.
+	session, err := client.NewSession()
+	if err != nil {
+		return nil, err
+	}
+	out, err := session.StdoutPipe()
+	if err != nil {
+		return nil, err
+	}
+	in, err := session.StdinPipe()
+	if err != nil {
+		return nil, err
+	}
+	if err := session.Shell(); err != nil {
+		return nil, err
+	}
+	return &gceSerial{
+		in:     in,
+		out:    out,
+		sess:   session,
+		client: client,
+	}, nil
+}
+
+func (s *gceSerial) Read(b []byte) (int, error) {
+	return s.out.Read(b)
+}
+
+func (s *gceSerial) Write(b []byte) (int, error) {
+	return s.in.Write(b)
+}
+
+func (s *gceSerial) Close() error {
+	multierr := ""
+	if err := s.in.Close(); err != nil {
+		multierr += fmt.Sprintf("failed to close serial SSH session input pipe: %s, ", err)
+	}
+	if err := s.sess.Close(); err != nil {
+		multierr += fmt.Sprintf("failed to close serial SSH session: %s, ", err)
+	}
+	if err := s.client.Close(); err != nil {
+		multierr += fmt.Sprintf("failed to close serial SSH client: %s", err)
+	}
+	if multierr != "" {
+		return errors.New(multierr)
+	}
+	return nil
+}
+
+// GCEConfig represents the on disk config used by botanist to launch a GCE
+// instance.
+type GCEConfig struct {
+	// MediatorURL is the url of the GCE Mediator.
+	MediatorURL string `json:"mediator_url"`
+	// BuildID is the swarming task ID of the associated build.
+	BuildID string `json:"build_id"`
+	// CloudProject is the cloud project to create the GCE Instance in.
+	CloudProject string `json:"cloud_project"`
+	// SwarmingServer is the URL to the swarming server that fed us this
+	// task.
+	SwarmingServer string `json:"swarming_server"`
+	// MachineShape is the shape of the instance we want to create.
+	MachineShape string `json:"machine_shape"`
+}
+
+// GCETarget represents a GCE VM running Fuchsia.
+type GCETarget struct {
+	config       GCEConfig
+	opts         Options
+	pubkeyPath   string
+	instanceName string
+	zone         string
+	serial       io.ReadWriteCloser
+}
+
+// createInstanceRes is returned by the gcem_client's create-instance
+// subcommand. Its schema is determined by the CreateInstanceRes proto
+// message in http://google3/turquoise/infra/gce_mediator/proto/mediator.proto.
+type createInstanceRes struct {
+	InstanceName string `json:"instanceName"`
+	Zone         string `json:"zone"`
+}
+
+// NewGCETarget creates, starts, and connects to the serial console of a GCE VM.
+func NewGCETarget(ctx context.Context, config GCEConfig, opts Options) (*GCETarget, error) {
+	// Generate an SSH keypair. We do this even if the caller has provided
+	// an SSH key in opts because we require a very specific input format:
+	// PEM encoded, PKCS1 marshaled RSA keys.
+	pkeyPath, err := generatePrivateKey()
+	if err != nil {
+		return nil, err
+	}
+	opts.SSHKey = pkeyPath
+	pubkeyPath, err := generatePublicKey(opts.SSHKey)
+	if err != nil {
+		return nil, err
+	}
+	logger.Infof(ctx, "generated SSH key pair for use with GCE instance")
+
+	// Set up and execute the command to create the instance.
+	taskID := os.Getenv("SWARMING_TASK_ID")
+	if taskID == "" {
+		return nil, errors.New("task did not specify SWARMING_TASK_ID")
+	}
+	u, err := user.Current()
+	if err != nil {
+		return nil, err
+	}
+	invocation := []string{
+		gcemClientBinary,
+		"create-instance",
+		"-host", config.MediatorURL,
+		"-project", config.CloudProject,
+		"-build-id", config.BuildID,
+		"-task-id", taskID,
+		"-swarming-host", config.SwarmingServer,
+		"-machine-shape", config.MachineShape,
+		"-user", u.Username,
+		"-pubkey", pubkeyPath,
+	}
+	logger.Infof(ctx, "creating instance using gcem_client: %v", invocation)
+
+	cmd := exec.Command(invocation[0], invocation[1:]...)
+	stdout, err := cmd.StdoutPipe()
+	if err != nil {
+		return nil, err
+	}
+	cmd.Stderr = os.Stderr
+
+	if err := cmd.Start(); err != nil {
+		return nil, err
+	}
+	var res createInstanceRes
+	if err := json.NewDecoder(stdout).Decode(&res); err != nil {
+		return nil, err
+	}
+	if err := cmd.Wait(); err != nil {
+		return nil, err
+	}
+
+	// Set up the "serial" line.
+	logger.Infof(ctx, "setting up the serial connection to the GCE instance")
+	username := fmt.Sprintf(
+		"%s.%s.%s.%s",
+		config.CloudProject,
+		res.Zone,
+		res.InstanceName,
+		u.Username,
+	)
+	serial, err := newGCESerial(opts.SSHKey, username, gceSerialEndpoint)
+	if err != nil {
+		return nil, err
+	}
+
+	return &GCETarget{
+		config:       config,
+		opts:         opts,
+		pubkeyPath:   pubkeyPath,
+		instanceName: res.InstanceName,
+		zone:         res.Zone,
+		serial:       serial,
+	}, nil
+}
+
+// generatePrivateKey generates a 2048 bit RSA private key, writes it to
+// a temporary file, and returns the path to the key.
+func generatePrivateKey() (string, error) {
+	pkey, err := rsa.GenerateKey(rand.Reader, 2048)
+	if err != nil {
+		return "", err
+	}
+	f, err := ioutil.TempFile("", "gce_pkey")
+	if err != nil {
+		return "", err
+	}
+	defer f.Close()
+	pemBlock := &pem.Block{
+		Type:    "RSA PRIVATE KEY",
+		Headers: nil,
+		Bytes:   x509.MarshalPKCS1PrivateKey(pkey),
+	}
+	return f.Name(), pem.Encode(f, pemBlock)
+}
+
+// generatePublicKey reads the private key at path pkey and generates a public
+// key in Authorized Keys format. Returns the path to the public key file.
+func generatePublicKey(pkeyFile string) (string, error) {
+	if pkeyFile == "" {
+		return "", errors.New("no private key file provided")
+	}
+	data, err := ioutil.ReadFile(pkeyFile)
+	if err != nil {
+		return "", err
+	}
+	block, _ := pem.Decode(data)
+	pkey, err := x509.ParsePKCS1PrivateKey(block.Bytes)
+	if err != nil {
+		return "", err
+	}
+	pubkey, err := ssh.NewPublicKey(pkey.Public())
+	if err != nil {
+		return "", err
+	}
+	f, err := ioutil.TempFile("", "gce_pubkey")
+	if err != nil {
+		return "", err
+	}
+	defer f.Close()
+	_, err = f.Write(ssh.MarshalAuthorizedKey(pubkey))
+	return f.Name(), err
+}
+
+func (g *GCETarget) Nodename() string {
+	// TODO(rudymathu): fill in nodename
+	return ""
+}
+
+func (g *GCETarget) Serial() io.ReadWriteCloser {
+	return g.serial
+}
+
+func (g *GCETarget) SSHKey() string {
+	return g.opts.SSHKey
+}
+
+func (g *GCETarget) Start(ctx context.Context, _ []bootserver.Image, args []string, _ string) error {
+	return nil
+}
+
+func (g *GCETarget) Stop(context.Context) error {
+	return g.serial.Close()
+}
+
+func (g *GCETarget) Wait(context.Context) error {
+	return ErrUnimplemented
+}
diff --git a/tools/botanist/target/gce_test.go b/tools/botanist/target/gce_test.go
new file mode 100644
index 0000000..9e5fa5d
--- /dev/null
+++ b/tools/botanist/target/gce_test.go
@@ -0,0 +1,63 @@
+// Copyright 2020 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package target
+
+import (
+	"crypto/x509"
+	"encoding/pem"
+	"io/ioutil"
+	"os"
+	"testing"
+
+	"golang.org/x/crypto/ssh"
+)
+
+func TestGeneratePrivateKey(t *testing.T) {
+	path, err := generatePrivateKey()
+	if err != nil {
+		t.Fatalf("generatePrivateKey() failed: got %s, want <nil> error", err)
+	}
+	defer os.Remove(path)
+
+	// Load the pkey and ensure that it's in the right format.
+	b, err := ioutil.ReadFile(path)
+	if err != nil {
+		t.Fatalf("ReadFile(%s) failed: got %s, want <nil> error", path, err)
+	}
+	block, _ := pem.Decode(b)
+	pkey, err := x509.ParsePKCS1PrivateKey(block.Bytes)
+	if err != nil {
+		t.Fatalf("x509.ParsePKCS1PrivateKey(bytes) failed: got %s, want <nil> error", err)
+	}
+	if err := pkey.Validate(); err != nil {
+		t.Errorf("pkey.Validate() failed: got %s, want <nil> error", err)
+	}
+}
+
+func TestGeneratePublicKey(t *testing.T) {
+	pkeyPath, err := generatePrivateKey()
+	if err != nil {
+		t.Fatalf("generatePrivateKey() failed: got %s, want <nil> error", err)
+	}
+	defer os.Remove(pkeyPath)
+	path, err := generatePublicKey(pkeyPath)
+	if err != nil {
+		t.Fatalf("generatePublicKey(%s) failed: got %s, want <nil> error", pkeyPath, err)
+	}
+	defer os.Remove(path)
+
+	// Load the public key and ensure that it's in the right format.
+	b, err := ioutil.ReadFile(path)
+	if err != nil {
+		t.Fatalf("ReadFile(%s) failed: got %s, want <nil> error", path, err)
+	}
+	key, _, _, _, err := ssh.ParseAuthorizedKey(b)
+	if err != nil {
+		t.Fatalf("ssh.ParseAuthorizedKey(pubkeyBytes) failed: got %s, want <nil> error", err)
+	}
+	if key.Type() != "ssh-rsa" {
+		t.Errorf("key %s has wrong type: got %s, want ssh-rsa", key, key.Type())
+	}
+}
diff --git a/tools/check-licenses/cmd/main.go b/tools/check-licenses/cmd/main.go
index 1ed9bb9..6f69f60 100644
--- a/tools/check-licenses/cmd/main.go
+++ b/tools/check-licenses/cmd/main.go
@@ -18,8 +18,6 @@
 	checklicenses "go.fuchsia.dev/fuchsia/tools/check-licenses"
 )
 
-var config checklicenses.Config
-
 var (
 	configFile = flag.String("config_file", "tools/check-licenses/config/config.json", "Location of config.json.")
 
@@ -69,7 +67,8 @@
 		log.SetOutput(ioutil.Discard)
 	}
 
-	if err := config.Init(*configFile); err != nil {
+	config, err := checklicenses.NewConfig(*configFile)
+	if err != nil {
 		return fmt.Errorf("failed to initialize config: %s", err)
 	}
 
@@ -144,7 +143,7 @@
 		//config.Target = *target
 	}
 
-	if err := checklicenses.Walk(context.Background(), &config); err != nil {
+	if err := checklicenses.Walk(context.Background(), config); err != nil {
 		return fmt.Errorf("failed to analyze the given directory: %v", err)
 	}
 	return nil
diff --git a/tools/check-licenses/cmd/main_test.go b/tools/check-licenses/cmd/main_test.go
index bbc8964..791681e 100644
--- a/tools/check-licenses/cmd/main_test.go
+++ b/tools/check-licenses/cmd/main_test.go
@@ -18,8 +18,8 @@
 	if err := ioutil.WriteFile(path, []byte(json), 0o600); err != nil {
 		t.Errorf("%v(): got %v", t.Name(), err)
 	}
-	var config checklicenses.Config
-	if err := config.Init(path); err != nil {
+	_, err := checklicenses.NewConfig(path)
+	if err != nil {
 		t.Errorf("%v(): got %v", t.Name(), err)
 	}
 }
diff --git a/tools/check-licenses/config.go b/tools/check-licenses/config.go
index d194070..9dbed05 100644
--- a/tools/check-licenses/config.go
+++ b/tools/check-licenses/config.go
@@ -48,16 +48,18 @@
 // Init populates Config object with values found in the json config file.
 //
 // Both SkipFiles and SingleLicenseFiles are lowered.
-func (c *Config) Init(path string) error {
+func NewConfig(path string) (*Config, error) {
+	c := &Config{}
+
 	f, err := os.Open(path)
 	if err != nil {
-		return err
+		return nil, err
 	}
 	defer f.Close()
 	d := json.NewDecoder(f)
 	d.DisallowUnknownFields()
 	if err = d.Decode(c); err != nil {
-		return err
+		return nil, err
 	}
 	for i := range c.SingleLicenseFiles {
 		c.SingleLicenseFiles[i] = strings.ToLower(c.SingleLicenseFiles[i])
@@ -69,7 +71,7 @@
 		c.BaseDir = "."
 	}
 	if c.Target != "all" {
-		return errors.New("target must be \"all\"")
+		return nil, errors.New("target must be \"all\"")
 	}
-	return nil
+	return c, nil
 }
diff --git a/tools/check-licenses/config/config.json b/tools/check-licenses/config/config.json
index 551fde7..fb67308 100644
--- a/tools/check-licenses/config/config.json
+++ b/tools/check-licenses/config/config.json
@@ -195,6 +195,10 @@
       "licenseLocation": "repo/apache-2.0.txt"
     },
     {
+      "projectRoot": "third_party/openthread/openthread-tmp/third_party/mbedtls",
+      "licenseLocation": "repo/apache-2.0.txt"
+    },
+    {
       "projectRoot": "third_party/zlib",
       "licenseLocation": "README"
     },
diff --git a/tools/check-licenses/config_test.go b/tools/check-licenses/config_test.go
index 8dcf024..f3a4f15 100644
--- a/tools/check-licenses/config_test.go
+++ b/tools/check-licenses/config_test.go
@@ -13,15 +13,15 @@
 
 var testDataDir = flag.String("test_data_dir", "", "Path to test data; only used in GN build")
 
-func TestConfigInit(t *testing.T) {
+func TestConfigNew(t *testing.T) {
 	folder := t.TempDir()
 	path := filepath.Join(folder, "config.json")
 	json := `{"filesRegex":[],"skipFiles":[".gitignore"],"skipDirs":[".git"],"textExtensionList":["go"],"maxReadSize":6144,"separatorWidth":80,"outputFilePrefix":"NOTICE","outputFileExtension":"txt","outputLicenseFile": true,"product":"astro","singleLicenseFiles":["LICENSE"],"licensePatternDir":"golden/","baseDir":".","target":"all","logLevel":"verbose", "customProjectLicenses": [{"projectRoot": "test", "licenseLocation": "test"}], "exitOnUnlicensedFiles": false}`
 	if err := ioutil.WriteFile(path, []byte(json), 0o600); err != nil {
 		t.Errorf("%v(): got %v", t.Name(), err)
 	}
-	var config Config
-	if err := config.Init(path); err != nil {
+	config, err := NewConfig(path)
+	if err != nil {
 		t.Errorf("%v(): got %v", t.Name(), err)
 	}
 	want := "."
@@ -30,10 +30,10 @@
 	}
 }
 
-func TestDefaultConfig(t *testing.T) {
-	config := Config{}
+func TestConfigDefault(t *testing.T) {
 	p := filepath.Join(*testDataDir, "config", "config.json")
-	if err := config.Init(p); err != nil {
+	_, err := NewConfig(p)
+	if err != nil {
 		t.Errorf("%v(): got %v", t.Name(), err)
 	}
 }
diff --git a/tools/check-licenses/file_test.go b/tools/check-licenses/file_test.go
index cbd432a..5ea17be 100644
--- a/tools/check-licenses/file_test.go
+++ b/tools/check-licenses/file_test.go
@@ -15,7 +15,7 @@
 )
 
 // NewFile(path) should successfully return a file object for normal files.
-func TestCreateNormalFile(t *testing.T) {
+func TestFileCreateNormal(t *testing.T) {
 	tmpfile, err := ioutil.TempFile("", "normal.go")
 	if err != nil {
 		log.Fatal(err)
@@ -29,7 +29,7 @@
 }
 
 // NewFile(path) should successfully return a file object for symlinked files.
-func TestCreateRelativeSymlinkFile(t *testing.T) {
+func TestFileCreateRelativeSymlink(t *testing.T) {
 	// File that we'll be pointing at.
 	target, err := ioutil.TempFile("", "normal.go")
 	if err != nil {
@@ -70,7 +70,7 @@
 
 // NewFile(path) should fail and return an error if the specified path
 // does not exist.
-func TestCreateNonExistentFile(t *testing.T) {
+func TestFileCreateNonExistent(t *testing.T) {
 	// Temporary directory for holding the symlink.
 	tmpdir, err := ioutil.TempDir("", "symdir")
 	if err != nil {
diff --git a/tools/check-licenses/file_tree.go b/tools/check-licenses/file_tree.go
index 6dcd527..a3ae067 100644
--- a/tools/check-licenses/file_tree.go
+++ b/tools/check-licenses/file_tree.go
@@ -34,8 +34,10 @@
 // file.
 func NewFileTree(ctx context.Context, root string, parent *FileTree, config *Config, metrics *Metrics) *FileTree {
 	defer trace.StartRegion(ctx, "NewFileTree").End()
-	var ft FileTree
-	ft.Init()
+	ft := FileTree{
+		Children:           make(map[string]*FileTree),
+		SingleLicenseFiles: make(map[string][]*License),
+	}
 
 	abs, _ := filepath.Abs(root)
 	ft.Name = filepath.Base(abs)
@@ -131,37 +133,32 @@
 	return &ft
 }
 
-func (license_file_tree *FileTree) Init() {
-	license_file_tree.Children = make(map[string]*FileTree)
-	license_file_tree.SingleLicenseFiles = make(map[string][]*License)
-}
-
-func (file_tree *FileTree) propagateProjectLicenses(config *Config) {
+func (ft *FileTree) propagateProjectLicenses(config *Config) {
 	propagate := true
 	for _, dirName := range config.StopLicensePropagation {
-		if file_tree.Name == dirName {
+		if ft.Name == dirName {
 			propagate = false
 			break
 		}
 	}
 
-	if propagate && file_tree.Parent != nil {
-		for key, val := range file_tree.Parent.SingleLicenseFiles {
-			file_tree.SingleLicenseFiles[key] = val
+	if propagate && ft.Parent != nil {
+		for key, val := range ft.Parent.SingleLicenseFiles {
+			ft.SingleLicenseFiles[key] = val
 		}
 	}
 
-	for _, child := range file_tree.Children {
+	for _, child := range ft.Children {
 		child.propagateProjectLicenses(config)
 	}
 }
 
-func (file_tree *FileTree) getSingleLicenseFileIterator() <-chan *FileTree {
+func (ft *FileTree) getSingleLicenseFileIterator() <-chan *FileTree {
 	ch := make(chan *FileTree, 1)
 	go func() {
 		var curr *FileTree
 		var q []*FileTree
-		q = append(q, file_tree)
+		q = append(q, ft)
 		var pos int
 		for len(q) > 0 {
 			pos = len(q) - 1
@@ -181,12 +178,12 @@
 	return ch
 }
 
-func (file_tree *FileTree) getFileIterator() <-chan *File {
+func (ft *FileTree) getFileIterator() <-chan *File {
 	ch := make(chan *File, 1)
 	go func() {
 		var curr *FileTree
 		var q []*FileTree
-		q = append(q, file_tree)
+		q = append(q, ft)
 		var pos int
 		for len(q) > 0 {
 			pos = len(q) - 1
@@ -209,16 +206,16 @@
 // Maps are used in FileTree to prevent duplicate values (since go doesn't have sets).
 // However, Maps make the final JSON object difficult to read.
 // Define a custom MarshalJSON function to convert the internal Maps into slices.
-func (file_tree *FileTree) MarshalJSON() ([]byte, error) {
+func (ft *FileTree) MarshalJSON() ([]byte, error) {
 	type Alias FileTree
 	childrenList := []*FileTree{}
 	fileList := []string{}
 
-	for _, c := range file_tree.Children {
+	for _, c := range ft.Children {
 		childrenList = append(childrenList, c)
 	}
 
-	for _, f := range file_tree.Files {
+	for _, f := range ft.Files {
 		fileList = append(fileList, f.Name)
 	}
 
@@ -226,13 +223,13 @@
 		*Alias
 		Children []*FileTree `json:"children"`
 	}{
-		Alias:    (*Alias)(file_tree),
+		Alias:    (*Alias)(ft),
 		Children: childrenList,
 	})
 }
 
-func (file_tree *FileTree) saveTreeState(filename string) error {
-	jsonString, err := json.MarshalIndent(file_tree, "", " ")
+func (ft *FileTree) saveTreeState(filename string) error {
+	jsonString, err := json.MarshalIndent(ft, "", " ")
 	if err != nil {
 		return fmt.Errorf("error marshalling the file tree: %v\n", err)
 	}
@@ -249,25 +246,25 @@
 	return nil
 }
 
-func (file_tree *FileTree) Equal(other *FileTree) bool {
-	if file_tree.Name != other.Name {
+func (ft *FileTree) Equal(other *FileTree) bool {
+	if ft.Name != other.Name {
 		return false
 	}
-	if file_tree.Path != other.Path {
+	if ft.Path != other.Path {
 		return false
 	}
-	if file_tree.Parent != other.Parent {
+	if ft.Parent != other.Parent {
 		return false
 	}
-	if file_tree.StrictAnalysis != other.StrictAnalysis {
+	if ft.StrictAnalysis != other.StrictAnalysis {
 		return false
 	}
 
-	if len(file_tree.SingleLicenseFiles) != len(other.SingleLicenseFiles) {
+	if len(ft.SingleLicenseFiles) != len(other.SingleLicenseFiles) {
 		return false
 	}
-	for k := range file_tree.SingleLicenseFiles {
-		left := file_tree.SingleLicenseFiles[k]
+	for k := range ft.SingleLicenseFiles {
+		left := ft.SingleLicenseFiles[k]
 		right := other.SingleLicenseFiles[k]
 		if len(left) != len(right) {
 			return false
@@ -279,20 +276,20 @@
 		}
 	}
 
-	if len(file_tree.Files) != len(other.Files) {
+	if len(ft.Files) != len(other.Files) {
 		return false
 	}
-	for i := range file_tree.Files {
-		if !file_tree.Files[i].Equal(other.Files[i]) {
+	for i := range ft.Files {
+		if !ft.Files[i].Equal(other.Files[i]) {
 			return false
 		}
 	}
 
-	if len(file_tree.Children) != len(other.Children) {
+	if len(ft.Children) != len(other.Children) {
 		return false
 	}
-	for k := range file_tree.Children {
-		if file_tree.Children[k] != other.Children[k] {
+	for k := range ft.Children {
+		if ft.Children[k] != other.Children[k] {
 			return false
 		}
 	}
diff --git a/tools/check-licenses/file_tree_test.go b/tools/check-licenses/file_tree_test.go
index f0dad61..80d2b35 100644
--- a/tools/check-licenses/file_tree_test.go
+++ b/tools/check-licenses/file_tree_test.go
@@ -6,45 +6,52 @@
 
 import (
 	"context"
-	"os"
 	"path/filepath"
 	"testing"
 )
 
-func TestFileTreeNew(t *testing.T) {
-	baseDir := filepath.Join(*testDataDir, "filetree", "simple")
-	configPath := filepath.Join(*testDataDir, "filetree", "simple.json")
-	testFile := filepath.Join(*testDataDir, "filetree", "simple", "test.py")
+// NewFileTree(empty) should produce a filetree object that correctly
+// represents an empty directory.
+func TestFileTreeCreateEmpty(t *testing.T) {
+	root, config := setupFileTreeTestDir("empty", t)
 
-	config := Config{}
-	if err := config.Init(configPath); err != nil {
-		t.Fatal(err)
-	}
-	config.BaseDir = baseDir
+	got := NewFileTree(context.Background(), root, nil, config, NewMetrics())
 
-	metrics := Metrics{}
-	metrics.Init()
-	got := NewFileTree(context.Background(), config.BaseDir, nil, &config, &metrics)
-
-	cwd, err := os.Getwd()
-	if err != nil {
-		t.Error(err)
-	}
 	want := &FileTree{
-		Name: "simple",
-		Path: filepath.Join(cwd, baseDir),
+		Name:  "empty",
+		Path:  root,
+		Files: []*File{},
 	}
-	f, err := NewFile(testFile, got)
-	if err != nil {
-		t.Error(err)
-	}
-	want.Files = append(want.Files, f)
 
 	if !got.Equal(want) {
 		t.Errorf("%v(): got %v, want %v", t.Name(), got, want)
 	}
 }
 
+// NewFileTree(simple) should produce a filetree object that correctly
+// represents the simple testdata directory.
+func TestFileTreeCreateSimple(t *testing.T) {
+	root, config := setupFileTreeTestDir("simple", t)
+
+	got := NewFileTree(context.Background(), root, nil, config, NewMetrics())
+
+	f, err := NewFile(filepath.Join(root, "test.py"), got)
+	if err != nil {
+		t.Error(err)
+	}
+	want := &FileTree{
+		Name:  "simple",
+		Path:  root,
+		Files: []*File{f},
+	}
+
+	if !got.Equal(want) {
+		t.Errorf("%v(): got %v, want %v", t.Name(), got, want)
+	}
+}
+
+// hasLowerPrefix must return true if the given filepath has a string prefix
+// in the predefined list.
 func TestFileTreeHasLowerPrefix(t *testing.T) {
 	name := "LICENSE-THIRD-PARTY"
 	singleLicenseFiles := []string{"license", "readme"}
@@ -52,3 +59,17 @@
 		t.Errorf("%v: %v is not a single license file", t.Name(), name)
 	}
 }
+
+func setupFileTreeTestDir(name string, t *testing.T) (string, *Config) {
+	configPath := filepath.Join(*testDataDir, "filetree", name+".json")
+	baseDir, err := filepath.Abs(filepath.Join(*testDataDir, "filetree", name))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	config, err := NewConfig(configPath)
+	if err != nil {
+		t.Fatal(err)
+	}
+	return baseDir, config
+}
diff --git a/tools/check-licenses/golden/examples/apache_llvm_full.txt b/tools/check-licenses/golden/examples/exception/apache_llvm/apache_llvm_full.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/apache_llvm_full.txt
rename to tools/check-licenses/golden/examples/exception/apache_llvm/apache_llvm_full.txt
diff --git a/tools/check-licenses/golden/examples/llvm_project2.txt b/tools/check-licenses/golden/examples/exception/apache_llvm/llvm_project.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/llvm_project2.txt
rename to tools/check-licenses/golden/examples/exception/apache_llvm/llvm_project.txt
diff --git a/tools/check-licenses/golden/examples/ffmpeg.txt b/tools/check-licenses/golden/examples/exception/ffmpeg/ffmpeg.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/ffmpeg.txt
rename to tools/check-licenses/golden/examples/exception/ffmpeg/ffmpeg.txt
diff --git a/tools/check-licenses/golden/examples/rust_project.txt b/tools/check-licenses/golden/examples/exception/rust/rust_project.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/rust_project.txt
rename to tools/check-licenses/golden/examples/exception/rust/rust_project.txt
diff --git a/tools/check-licenses/golden/examples/ofl.txt b/tools/check-licenses/golden/examples/exception/sil/ofl.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/ofl.txt
rename to tools/check-licenses/golden/examples/exception/sil/ofl.txt
diff --git a/tools/check-licenses/golden/examples/sil_open_font.txt b/tools/check-licenses/golden/examples/exception/sil/sil_open_font.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/sil_open_font.txt
rename to tools/check-licenses/golden/examples/exception/sil/sil_open_font.txt
diff --git a/tools/check-licenses/golden/examples/nordic_freertos.txt b/tools/check-licenses/golden/examples/nordic_freertos.txt
deleted file mode 100644
index b91fed6..0000000
--- a/tools/check-licenses/golden/examples/nordic_freertos.txt
+++ /dev/null
@@ -1,60 +0,0 @@
-FreeRTOS license
-================
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-
-Nordic Semiconductor License
-=============================
-
-Copyright (c) 2010 - 2019, Nordic Semiconductor ASA
-
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice, this
-   list of conditions and the following disclaimer.
-
-2. Redistributions in binary form, except as embedded into a Nordic
-   Semiconductor ASA integrated circuit in a product or a software update for
-   such product, must reproduce the above copyright notice, this list of
-   conditions and the following disclaimer in the documentation and/or other
-   materials provided with the distribution.
-
-3. Neither the name of Nordic Semiconductor ASA nor the names of its
-   contributors may be used to endorse or promote products derived from this
-   software without specific prior written permission.
-
-4. This software, with or without modification, must only be used with a
-   Nordic Semiconductor ASA integrated circuit.
-
-5. Any software provided in binary form under this license must not be reverse
-   engineered, decompiled, modified and/or disassembled.
-
-THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
-OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
-GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
-OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/tools/check-licenses/golden/examples/apache.txt b/tools/check-licenses/golden/examples/notice/apache/apache.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/apache.txt
rename to tools/check-licenses/golden/examples/notice/apache/apache.txt
diff --git a/tools/check-licenses/golden/examples/apache_full.txt b/tools/check-licenses/golden/examples/notice/apache/apache_full.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/apache_full.txt
rename to tools/check-licenses/golden/examples/notice/apache/apache_full.txt
diff --git a/tools/check-licenses/golden/examples/mbedtls.txt b/tools/check-licenses/golden/examples/notice/apache/mbedtls.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/mbedtls.txt
rename to tools/check-licenses/golden/examples/notice/apache/mbedtls.txt
diff --git a/tools/check-licenses/golden/examples/rsa.txt b/tools/check-licenses/golden/examples/notice/apache/rsa.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/rsa.txt
rename to tools/check-licenses/golden/examples/notice/apache/rsa.txt
diff --git a/tools/check-licenses/golden/examples/rust.txt b/tools/check-licenses/golden/examples/notice/apache/rust.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/rust.txt
rename to tools/check-licenses/golden/examples/notice/apache/rust.txt
diff --git a/tools/check-licenses/golden/examples/acpica.txt b/tools/check-licenses/golden/examples/notice/bsd/acpica.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/acpica.txt
rename to tools/check-licenses/golden/examples/notice/bsd/acpica.txt
diff --git a/tools/check-licenses/golden/examples/android.txt b/tools/check-licenses/golden/examples/notice/bsd/android.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/android.txt
rename to tools/check-licenses/golden/examples/notice/bsd/android.txt
diff --git a/tools/check-licenses/golden/examples/bsd.txt b/tools/check-licenses/golden/examples/notice/bsd/bsd.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/bsd.txt
rename to tools/check-licenses/golden/examples/notice/bsd/bsd.txt
diff --git a/tools/check-licenses/golden/examples/bsd2.txt b/tools/check-licenses/golden/examples/notice/bsd/bsd2.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/bsd2.txt
rename to tools/check-licenses/golden/examples/notice/bsd/bsd2.txt
diff --git a/tools/check-licenses/golden/examples/bsd3.txt b/tools/check-licenses/golden/examples/notice/bsd/bsd3.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/bsd3.txt
rename to tools/check-licenses/golden/examples/notice/bsd/bsd3.txt
diff --git a/tools/check-licenses/golden/examples/chromium.txt b/tools/check-licenses/golden/examples/notice/bsd/chromium.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/chromium.txt
rename to tools/check-licenses/golden/examples/notice/bsd/chromium.txt
diff --git a/tools/check-licenses/golden/examples/chromium2.txt b/tools/check-licenses/golden/examples/notice/bsd/chromium2.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/chromium2.txt
rename to tools/check-licenses/golden/examples/notice/bsd/chromium2.txt
diff --git a/tools/check-licenses/golden/examples/cmake_project.txt b/tools/check-licenses/golden/examples/notice/bsd/cmake_project.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/cmake_project.txt
rename to tools/check-licenses/golden/examples/notice/bsd/cmake_project.txt
diff --git a/tools/check-licenses/golden/examples/dart.txt b/tools/check-licenses/golden/examples/notice/bsd/dart.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/dart.txt
rename to tools/check-licenses/golden/examples/notice/bsd/dart.txt
diff --git a/tools/check-licenses/golden/examples/edk2.txt b/tools/check-licenses/golden/examples/notice/bsd/edk2.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/edk2.txt
rename to tools/check-licenses/golden/examples/notice/bsd/edk2.txt
diff --git a/tools/check-licenses/golden/examples/flutter.txt b/tools/check-licenses/golden/examples/notice/bsd/flutter.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/flutter.txt
rename to tools/check-licenses/golden/examples/notice/bsd/flutter.txt
diff --git a/tools/check-licenses/golden/examples/fuchsia2.txt b/tools/check-licenses/golden/examples/notice/bsd/fuchsia.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/fuchsia2.txt
rename to tools/check-licenses/golden/examples/notice/bsd/fuchsia.txt
diff --git a/tools/check-licenses/golden/examples/fuchsia3.txt b/tools/check-licenses/golden/examples/notice/bsd/fuchsia2.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/fuchsia3.txt
rename to tools/check-licenses/golden/examples/notice/bsd/fuchsia2.txt
diff --git a/tools/check-licenses/golden/examples/fuchsia_bsd.txt b/tools/check-licenses/golden/examples/notice/bsd/fuchsia_bsd.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/fuchsia_bsd.txt
rename to tools/check-licenses/golden/examples/notice/bsd/fuchsia_bsd.txt
diff --git a/tools/check-licenses/golden/examples/fuchsia_bsd2.txt b/tools/check-licenses/golden/examples/notice/bsd/fuchsia_bsd2.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/fuchsia_bsd2.txt
rename to tools/check-licenses/golden/examples/notice/bsd/fuchsia_bsd2.txt
diff --git a/tools/check-licenses/golden/examples/go_authors.txt b/tools/check-licenses/golden/examples/notice/bsd/go_authors.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/go_authors.txt
rename to tools/check-licenses/golden/examples/notice/bsd/go_authors.txt
diff --git a/tools/check-licenses/golden/examples/go_authors2.txt b/tools/check-licenses/golden/examples/notice/bsd/go_authors2.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/go_authors2.txt
rename to tools/check-licenses/golden/examples/notice/bsd/go_authors2.txt
diff --git a/tools/check-licenses/golden/examples/google.txt b/tools/check-licenses/golden/examples/notice/bsd/google.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/google.txt
rename to tools/check-licenses/golden/examples/notice/bsd/google.txt
diff --git a/tools/check-licenses/golden/examples/happy_bunny.txt b/tools/check-licenses/golden/examples/notice/bsd/happy_bunny.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/happy_bunny.txt
rename to tools/check-licenses/golden/examples/notice/bsd/happy_bunny.txt
diff --git a/tools/check-licenses/golden/examples/intel.txt b/tools/check-licenses/golden/examples/notice/bsd/intel.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/intel.txt
rename to tools/check-licenses/golden/examples/notice/bsd/intel.txt
diff --git a/tools/check-licenses/golden/examples/libavcodec.txt b/tools/check-licenses/golden/examples/notice/bsd/libavcodec.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/libavcodec.txt
rename to tools/check-licenses/golden/examples/notice/bsd/libavcodec.txt
diff --git a/tools/check-licenses/golden/examples/lss_full.txt b/tools/check-licenses/golden/examples/notice/bsd/lss_full.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/lss_full.txt
rename to tools/check-licenses/golden/examples/notice/bsd/lss_full.txt
diff --git a/tools/check-licenses/golden/examples/lz4.txt b/tools/check-licenses/golden/examples/notice/bsd/lz4.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/lz4.txt
rename to tools/check-licenses/golden/examples/notice/bsd/lz4.txt
diff --git a/tools/check-licenses/golden/examples/markupsafe.txt b/tools/check-licenses/golden/examples/notice/bsd/markupsafe.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/markupsafe.txt
rename to tools/check-licenses/golden/examples/notice/bsd/markupsafe.txt
diff --git a/tools/check-licenses/golden/examples/nacl.txt b/tools/check-licenses/golden/examples/notice/bsd/nacl.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/nacl.txt
rename to tools/check-licenses/golden/examples/notice/bsd/nacl.txt
diff --git a/tools/check-licenses/golden/examples/openssl_project.txt b/tools/check-licenses/golden/examples/notice/bsd/openssl_project.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/openssl_project.txt
rename to tools/check-licenses/golden/examples/notice/bsd/openssl_project.txt
diff --git a/tools/check-licenses/golden/examples/university_of_california.txt b/tools/check-licenses/golden/examples/notice/bsd/university_of_california.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/university_of_california.txt
rename to tools/check-licenses/golden/examples/notice/bsd/university_of_california.txt
diff --git a/tools/check-licenses/golden/examples/university_of_california2.txt b/tools/check-licenses/golden/examples/notice/bsd/university_of_california2.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/university_of_california2.txt
rename to tools/check-licenses/golden/examples/notice/bsd/university_of_california2.txt
diff --git a/tools/check-licenses/golden/examples/freetype.txt b/tools/check-licenses/golden/examples/notice/freetype/freetype.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/freetype.txt
rename to tools/check-licenses/golden/examples/notice/freetype/freetype.txt
diff --git a/tools/check-licenses/golden/examples/freetype2.txt b/tools/check-licenses/golden/examples/notice/freetype/freetype2.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/freetype2.txt
rename to tools/check-licenses/golden/examples/notice/freetype/freetype2.txt
diff --git a/tools/check-licenses/golden/examples/boring_ssl.txt b/tools/check-licenses/golden/examples/notice/isc/boring_ssl.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/boring_ssl.txt
rename to tools/check-licenses/golden/examples/notice/isc/boring_ssl.txt
diff --git a/tools/check-licenses/golden/examples/fuchsia.txt b/tools/check-licenses/golden/examples/notice/isc/fuchsia.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/fuchsia.txt
rename to tools/check-licenses/golden/examples/notice/isc/fuchsia.txt
diff --git a/tools/check-licenses/golden/examples/icu.txt b/tools/check-licenses/golden/examples/notice/isc/icu.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/icu.txt
rename to tools/check-licenses/golden/examples/notice/isc/icu.txt
diff --git a/tools/check-licenses/golden/examples/isc.txt b/tools/check-licenses/golden/examples/notice/isc/isc.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/isc.txt
rename to tools/check-licenses/golden/examples/notice/isc/isc.txt
diff --git a/tools/check-licenses/golden/examples/digital_equipment.txt b/tools/check-licenses/golden/examples/notice/mit/digital_equipment.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/digital_equipment.txt
rename to tools/check-licenses/golden/examples/notice/mit/digital_equipment.txt
diff --git a/tools/check-licenses/golden/examples/fuchsia4.txt b/tools/check-licenses/golden/examples/notice/mit/fuchsia.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/fuchsia4.txt
rename to tools/check-licenses/golden/examples/notice/mit/fuchsia.txt
diff --git a/tools/check-licenses/golden/examples/fuchsia_mit.txt b/tools/check-licenses/golden/examples/notice/mit/fuchsia_mit.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/fuchsia_mit.txt
rename to tools/check-licenses/golden/examples/notice/mit/fuchsia_mit.txt
diff --git a/tools/check-licenses/golden/examples/icu2.txt b/tools/check-licenses/golden/examples/notice/mit/icu.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/icu2.txt
rename to tools/check-licenses/golden/examples/notice/mit/icu.txt
diff --git a/tools/check-licenses/golden/examples/imgtec-pvr-rgx-km.txt b/tools/check-licenses/golden/examples/notice/mit/imgtec-pvr-rgx-km.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/imgtec-pvr-rgx-km.txt
rename to tools/check-licenses/golden/examples/notice/mit/imgtec-pvr-rgx-km.txt
diff --git a/tools/check-licenses/golden/examples/jq_project.txt b/tools/check-licenses/golden/examples/notice/mit/jq_project.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/jq_project.txt
rename to tools/check-licenses/golden/examples/notice/mit/jq_project.txt
diff --git a/tools/check-licenses/golden/examples/mesa_full.txt b/tools/check-licenses/golden/examples/notice/mit/mesa_full.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/mesa_full.txt
rename to tools/check-licenses/golden/examples/notice/mit/mesa_full.txt
diff --git a/tools/check-licenses/golden/examples/microsoft.txt b/tools/check-licenses/golden/examples/notice/mit/microsoft.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/microsoft.txt
rename to tools/check-licenses/golden/examples/notice/mit/microsoft.txt
diff --git a/tools/check-licenses/golden/examples/mit.txt b/tools/check-licenses/golden/examples/notice/mit/mit.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/mit.txt
rename to tools/check-licenses/golden/examples/notice/mit/mit.txt
diff --git a/tools/check-licenses/golden/examples/mit2.txt b/tools/check-licenses/golden/examples/notice/mit/mit2.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/mit2.txt
rename to tools/check-licenses/golden/examples/notice/mit/mit2.txt
diff --git a/tools/check-licenses/golden/examples/mit3.txt b/tools/check-licenses/golden/examples/notice/mit/mit3.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/mit3.txt
rename to tools/check-licenses/golden/examples/notice/mit/mit3.txt
diff --git a/tools/check-licenses/golden/examples/mit4.txt b/tools/check-licenses/golden/examples/notice/mit/mit4.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/mit4.txt
rename to tools/check-licenses/golden/examples/notice/mit/mit4.txt
diff --git a/tools/check-licenses/golden/examples/musl_full.txt b/tools/check-licenses/golden/examples/notice/mit/musl_full.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/musl_full.txt
rename to tools/check-licenses/golden/examples/notice/mit/musl_full.txt
diff --git a/tools/check-licenses/golden/examples/ncd.txt b/tools/check-licenses/golden/examples/notice/mit/ncd.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/ncd.txt
rename to tools/check-licenses/golden/examples/notice/mit/ncd.txt
diff --git a/tools/check-licenses/golden/examples/oracle.txt b/tools/check-licenses/golden/examples/notice/mit/oracle.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/oracle.txt
rename to tools/check-licenses/golden/examples/notice/mit/oracle.txt
diff --git a/tools/check-licenses/golden/examples/packard.txt b/tools/check-licenses/golden/examples/notice/mit/packard.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/packard.txt
rename to tools/check-licenses/golden/examples/notice/mit/packard.txt
diff --git a/tools/check-licenses/golden/examples/rarick.txt b/tools/check-licenses/golden/examples/notice/mit/rarick.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/rarick.txt
rename to tools/check-licenses/golden/examples/notice/mit/rarick.txt
diff --git a/tools/check-licenses/golden/examples/sgi.txt b/tools/check-licenses/golden/examples/notice/mit/sgi.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/sgi.txt
rename to tools/check-licenses/golden/examples/notice/mit/sgi.txt
diff --git a/tools/check-licenses/golden/examples/unicode_license_full.txt b/tools/check-licenses/golden/examples/notice/mit/unicode_license_full.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/unicode_license_full.txt
rename to tools/check-licenses/golden/examples/notice/mit/unicode_license_full.txt
diff --git a/tools/check-licenses/golden/examples/vulkan.txt b/tools/check-licenses/golden/examples/notice/mit/vulkan.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/vulkan.txt
rename to tools/check-licenses/golden/examples/notice/mit/vulkan.txt
diff --git a/tools/check-licenses/golden/examples/vulkan2.txt b/tools/check-licenses/golden/examples/notice/mit/vulkan2.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/vulkan2.txt
rename to tools/check-licenses/golden/examples/notice/mit/vulkan2.txt
diff --git a/tools/check-licenses/golden/examples/xfree86.txt b/tools/check-licenses/golden/examples/notice/mit/xfree86.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/xfree86.txt
rename to tools/check-licenses/golden/examples/notice/mit/xfree86.txt
diff --git a/tools/check-licenses/golden/examples/yoran_heling.txt b/tools/check-licenses/golden/examples/notice/mit/yoran_heling.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/yoran_heling.txt
rename to tools/check-licenses/golden/examples/notice/mit/yoran_heling.txt
diff --git a/tools/check-licenses/golden/examples/llvm_project.txt b/tools/check-licenses/golden/examples/notice/ncsa/llvm_project.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/llvm_project.txt
rename to tools/check-licenses/golden/examples/notice/ncsa/llvm_project.txt
diff --git a/tools/check-licenses/golden/examples/university_of_illinois.txt b/tools/check-licenses/golden/examples/notice/ncsa/university_of_illinois.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/university_of_illinois.txt
rename to tools/check-licenses/golden/examples/notice/ncsa/university_of_illinois.txt
diff --git a/tools/check-licenses/golden/examples/aac_full.txt b/tools/check-licenses/golden/examples/notice/other/aac_full.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/aac_full.txt
rename to tools/check-licenses/golden/examples/notice/other/aac_full.txt
diff --git a/tools/check-licenses/golden/examples/flite.txt b/tools/check-licenses/golden/examples/notice/other/flite.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/flite.txt
rename to tools/check-licenses/golden/examples/notice/other/flite.txt
diff --git a/tools/check-licenses/golden/examples/icu3.txt b/tools/check-licenses/golden/examples/notice/other/icu.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/icu3.txt
rename to tools/check-licenses/golden/examples/notice/other/icu.txt
diff --git a/tools/check-licenses/golden/examples/original_sslea_license.txt b/tools/check-licenses/golden/examples/notice/other/original_sslea_license.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/original_sslea_license.txt
rename to tools/check-licenses/golden/examples/notice/other/original_sslea_license.txt
diff --git a/tools/check-licenses/golden/examples/pthreads.txt b/tools/check-licenses/golden/examples/notice/other/pthreads.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/pthreads.txt
rename to tools/check-licenses/golden/examples/notice/other/pthreads.txt
diff --git a/tools/check-licenses/golden/examples/readme_ijg.txt b/tools/check-licenses/golden/examples/notice/other/readme_ijg.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/readme_ijg.txt
rename to tools/check-licenses/golden/examples/notice/other/readme_ijg.txt
diff --git a/tools/check-licenses/golden/examples/python_project.txt b/tools/check-licenses/golden/examples/notice/python/python_project.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/python_project.txt
rename to tools/check-licenses/golden/examples/notice/python/python_project.txt
diff --git a/tools/check-licenses/golden/examples/libpng.txt b/tools/check-licenses/golden/examples/notice/zlib/libpng.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/libpng.txt
rename to tools/check-licenses/golden/examples/notice/zlib/libpng.txt
diff --git a/tools/check-licenses/golden/examples/valgrind.txt b/tools/check-licenses/golden/examples/notice/zlib/valgrind.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/valgrind.txt
rename to tools/check-licenses/golden/examples/notice/zlib/valgrind.txt
diff --git a/tools/check-licenses/golden/examples/zlib.txt b/tools/check-licenses/golden/examples/notice/zlib/zlib.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/zlib.txt
rename to tools/check-licenses/golden/examples/notice/zlib/zlib.txt
diff --git a/tools/check-licenses/golden/examples/zlib_full.txt b/tools/check-licenses/golden/examples/notice/zlib/zlib_full.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/zlib_full.txt
rename to tools/check-licenses/golden/examples/notice/zlib/zlib_full.txt
diff --git a/tools/check-licenses/golden/examples/arm.txt b/tools/check-licenses/golden/examples/restricted/gpl/arm.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/arm.txt
rename to tools/check-licenses/golden/examples/restricted/gpl/arm.txt
diff --git a/tools/check-licenses/golden/examples/ffmpeg_project.txt b/tools/check-licenses/golden/examples/restricted/gpl/ffmpeg_project.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/ffmpeg_project.txt
rename to tools/check-licenses/golden/examples/restricted/gpl/ffmpeg_project.txt
diff --git a/tools/check-licenses/golden/examples/gcc.txt b/tools/check-licenses/golden/examples/restricted/gpl/gcc.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/gcc.txt
rename to tools/check-licenses/golden/examples/restricted/gpl/gcc.txt
diff --git a/tools/check-licenses/golden/examples/gcc2.txt b/tools/check-licenses/golden/examples/restricted/gpl/gcc2.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/gcc2.txt
rename to tools/check-licenses/golden/examples/restricted/gpl/gcc2.txt
diff --git a/tools/check-licenses/golden/examples/gcc3.txt b/tools/check-licenses/golden/examples/restricted/gpl/gcc3.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/gcc3.txt
rename to tools/check-licenses/golden/examples/restricted/gpl/gcc3.txt
diff --git a/tools/check-licenses/golden/examples/gnu.txt b/tools/check-licenses/golden/examples/restricted/gpl/gnu.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/gnu.txt
rename to tools/check-licenses/golden/examples/restricted/gpl/gnu.txt
diff --git a/tools/check-licenses/golden/examples/gnu2.txt b/tools/check-licenses/golden/examples/restricted/gpl/gnu2.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/gnu2.txt
rename to tools/check-licenses/golden/examples/restricted/gpl/gnu2.txt
diff --git a/tools/check-licenses/golden/examples/gnu3.txt b/tools/check-licenses/golden/examples/restricted/gpl/gnu3.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/gnu3.txt
rename to tools/check-licenses/golden/examples/restricted/gpl/gnu3.txt
diff --git a/tools/check-licenses/golden/examples/gnu4.txt b/tools/check-licenses/golden/examples/restricted/gpl/gnu4.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/gnu4.txt
rename to tools/check-licenses/golden/examples/restricted/gpl/gnu4.txt
diff --git a/tools/check-licenses/golden/examples/gnu5.txt b/tools/check-licenses/golden/examples/restricted/gpl/gnu5.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/gnu5.txt
rename to tools/check-licenses/golden/examples/restricted/gpl/gnu5.txt
diff --git a/tools/check-licenses/golden/examples/gnu6.txt b/tools/check-licenses/golden/examples/restricted/gpl/gnu6.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/gnu6.txt
rename to tools/check-licenses/golden/examples/restricted/gpl/gnu6.txt
diff --git a/tools/check-licenses/golden/examples/gpl.txt b/tools/check-licenses/golden/examples/restricted/gpl/gpl.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/gpl.txt
rename to tools/check-licenses/golden/examples/restricted/gpl/gpl.txt
diff --git a/tools/check-licenses/golden/examples/lgpl_full.txt b/tools/check-licenses/golden/examples/restricted/gpl/lgpl_full.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/lgpl_full.txt
rename to tools/check-licenses/golden/examples/restricted/gpl/lgpl_full.txt
diff --git a/tools/check-licenses/golden/examples/linux_gnu.txt b/tools/check-licenses/golden/examples/restricted/gpl/linux_gnu.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/linux_gnu.txt
rename to tools/check-licenses/golden/examples/restricted/gpl/linux_gnu.txt
diff --git a/tools/check-licenses/golden/examples/redhat.txt b/tools/check-licenses/golden/examples/restricted/gpl/redhat.txt
similarity index 100%
rename from tools/check-licenses/golden/examples/redhat.txt
rename to tools/check-licenses/golden/examples/restricted/gpl/redhat.txt
diff --git a/tools/check-licenses/golden/golden_test.go b/tools/check-licenses/golden/golden_test.go
index 01cbc78..bd67986 100644
--- a/tools/check-licenses/golden/golden_test.go
+++ b/tools/check-licenses/golden/golden_test.go
@@ -6,6 +6,7 @@
 
 import (
 	"flag"
+	"fmt"
 	"io/ioutil"
 	"os"
 	"path/filepath"
@@ -24,28 +25,50 @@
 			t.Errorf("%v(%v doesn't exist): got %v, want %v", t.Name(), p, err, nil)
 		}
 	}
-	example_files, err := ioutil.ReadDir(filepath.Join(*testDataDir, "examples"))
+	examplesRoot := filepath.Join(*testDataDir, "examples")
+	exampleFilesPath := []string{}
+	err := filepath.Walk(examplesRoot,
+		func(path string, info os.FileInfo, err error) error {
+			if info.IsDir() {
+				return nil
+			}
+			exampleFilesPath = append(exampleFilesPath, path)
+			return nil
+		})
 	if err != nil {
 		t.Errorf("%v, got %v", t.Name(), err)
 	}
-	pattern_files, err := ioutil.ReadDir(filepath.Join(*testDataDir, "patterns"))
+
+	patternsRoot := filepath.Join(*testDataDir, "patterns")
+	patternFilesPath := []string{}
+	err = filepath.Walk(patternsRoot,
+		func(path string, info os.FileInfo, err error) error {
+			if info.IsDir() {
+				return nil
+			}
+			patternFilesPath = append(patternFilesPath, path)
+			return nil
+		})
 	if err != nil {
 		t.Errorf("%v, got %v", t.Name(), err)
 	}
-	if len(example_files) != len(pattern_files) {
-		t.Errorf("%v, got %v != %v", t.Name(), len(example_files), len(pattern_files))
+
+	if len(exampleFilesPath) != len(patternFilesPath) {
+		t.Errorf("%v, got %v != %v", t.Name(), len(exampleFilesPath), len(patternFilesPath))
 	}
-	for _, pattern_file := range pattern_files {
-		pattern, err := ioutil.ReadFile(filepath.Join(*testDataDir, "patterns", pattern_file.Name()))
+	fmt.Printf("LENGTH %v", len(patternFilesPath))
+	for _, patternFilePath := range patternFilesPath {
+		patternFile, err := ioutil.ReadFile(patternFilePath)
 		if err != nil {
 			t.Errorf("%v, got %v", t.Name(), err)
 		}
-		example_file := strings.TrimSuffix(pattern_file.Name(), filepath.Ext(pattern_file.Name())) + ".txt"
-		example, err := ioutil.ReadFile(filepath.Join(*testDataDir, "examples", example_file))
+		exampleFilePath := strings.Replace(patternFilePath, ".lic", ".txt", -1)
+		exampleFilePath = strings.Replace(exampleFilePath, "patterns", "examples", -1)
+		exampleFile, err := ioutil.ReadFile(exampleFilePath)
 		if err != nil {
 			t.Errorf("%v, got %v", t.Name(), err)
 		}
-		regex := string(pattern)
+		regex := string(patternFile)
 		// Update regex to ignore multiple white spaces, newlines, comments.
 		// But first, trim whitespace away so we don't include unnecessary
 		// comment syntax.
@@ -53,8 +76,8 @@
 		regex = strings.ReplaceAll(regex, "\n", `[\s\\#\*\/]*`)
 		regex = strings.ReplaceAll(regex, " ", `[\s\\#\*\/]*`)
 
-		if !regexp.MustCompile(regex).Match(example) {
-			t.Errorf("%v, %v pattern doesn't match example", t.Name(), pattern_file.Name())
+		if !regexp.MustCompile(regex).Match(exampleFile) {
+			t.Errorf("%v, %v pattern doesn't match example", t.Name(), patternFilePath)
 		}
 	}
 }
diff --git a/tools/check-licenses/golden/patterns/apache_llvm_full.lic b/tools/check-licenses/golden/patterns/exception/apache_llvm/apache_llvm_full.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/apache_llvm_full.lic
rename to tools/check-licenses/golden/patterns/exception/apache_llvm/apache_llvm_full.lic
diff --git a/tools/check-licenses/golden/patterns/llvm_project2.lic b/tools/check-licenses/golden/patterns/exception/apache_llvm/llvm_project.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/llvm_project2.lic
rename to tools/check-licenses/golden/patterns/exception/apache_llvm/llvm_project.lic
diff --git a/tools/check-licenses/golden/patterns/ffmpeg.lic b/tools/check-licenses/golden/patterns/exception/ffmpeg/ffmpeg.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/ffmpeg.lic
rename to tools/check-licenses/golden/patterns/exception/ffmpeg/ffmpeg.lic
diff --git a/tools/check-licenses/golden/patterns/rust_project.lic b/tools/check-licenses/golden/patterns/exception/rust/rust_project.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/rust_project.lic
rename to tools/check-licenses/golden/patterns/exception/rust/rust_project.lic
diff --git a/tools/check-licenses/golden/patterns/ofl.lic b/tools/check-licenses/golden/patterns/exception/sil/ofl.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/ofl.lic
rename to tools/check-licenses/golden/patterns/exception/sil/ofl.lic
diff --git a/tools/check-licenses/golden/patterns/sil_open_font.lic b/tools/check-licenses/golden/patterns/exception/sil/sil_open_font.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/sil_open_font.lic
rename to tools/check-licenses/golden/patterns/exception/sil/sil_open_font.lic
diff --git a/tools/check-licenses/golden/patterns/nordic_freertos.lic b/tools/check-licenses/golden/patterns/nordic_freertos.lic
deleted file mode 100644
index d747181..0000000
--- a/tools/check-licenses/golden/patterns/nordic_freertos.lic
+++ /dev/null
@@ -1,60 +0,0 @@
-(?i)FreeRTOS license
-================
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files \(the "Software"\), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and\/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-
-Nordic Semiconductor License
-=============================
-
-Copyright \(c\) [\d]{4} - [\d]{4}, Nordic Semiconductor ASA
-
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice, this
-   list of conditions and the following disclaimer.
-
-2. Redistributions in binary form, except as embedded into a Nordic
-   Semiconductor ASA integrated circuit in a product or a software update for
-   such product, must reproduce the above copyright notice, this list of
-   conditions and the following disclaimer in the documentation and\/or other
-   materials provided with the distribution.
-
-3. Neither the name of Nordic Semiconductor ASA nor the names of its
-   contributors may be used to endorse or promote products derived from this
-   software without specific prior written permission.
-
-4. This software, with or without modification, must only be used with a
-   Nordic Semiconductor ASA integrated circuit.
-
-5. Any software provided in binary form under this license must not be reverse
-   engineered, decompiled, modified and\/or disassembled.
-
-THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS
-OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES \(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
-GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION\)
-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-LIABILITY, OR TORT \(INCLUDING NEGLIGENCE OR OTHERWISE\) ARISING IN ANY WAY OUT
-OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/tools/check-licenses/golden/patterns/apache.lic b/tools/check-licenses/golden/patterns/notice/apache/apache.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/apache.lic
rename to tools/check-licenses/golden/patterns/notice/apache/apache.lic
diff --git a/tools/check-licenses/golden/patterns/apache_full.lic b/tools/check-licenses/golden/patterns/notice/apache/apache_full.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/apache_full.lic
rename to tools/check-licenses/golden/patterns/notice/apache/apache_full.lic
diff --git a/tools/check-licenses/golden/patterns/mbedtls.lic b/tools/check-licenses/golden/patterns/notice/apache/mbedtls.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/mbedtls.lic
rename to tools/check-licenses/golden/patterns/notice/apache/mbedtls.lic
diff --git a/tools/check-licenses/golden/patterns/rsa.lic b/tools/check-licenses/golden/patterns/notice/apache/rsa.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/rsa.lic
rename to tools/check-licenses/golden/patterns/notice/apache/rsa.lic
diff --git a/tools/check-licenses/golden/patterns/rust.lic b/tools/check-licenses/golden/patterns/notice/apache/rust.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/rust.lic
rename to tools/check-licenses/golden/patterns/notice/apache/rust.lic
diff --git a/tools/check-licenses/golden/patterns/acpica.lic b/tools/check-licenses/golden/patterns/notice/bsd/acpica.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/acpica.lic
rename to tools/check-licenses/golden/patterns/notice/bsd/acpica.lic
diff --git a/tools/check-licenses/golden/patterns/android.lic b/tools/check-licenses/golden/patterns/notice/bsd/android.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/android.lic
rename to tools/check-licenses/golden/patterns/notice/bsd/android.lic
diff --git a/tools/check-licenses/golden/patterns/bsd.lic b/tools/check-licenses/golden/patterns/notice/bsd/bsd.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/bsd.lic
rename to tools/check-licenses/golden/patterns/notice/bsd/bsd.lic
diff --git a/tools/check-licenses/golden/patterns/bsd2.lic b/tools/check-licenses/golden/patterns/notice/bsd/bsd2.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/bsd2.lic
rename to tools/check-licenses/golden/patterns/notice/bsd/bsd2.lic
diff --git a/tools/check-licenses/golden/patterns/bsd3.lic b/tools/check-licenses/golden/patterns/notice/bsd/bsd3.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/bsd3.lic
rename to tools/check-licenses/golden/patterns/notice/bsd/bsd3.lic
diff --git a/tools/check-licenses/golden/patterns/chromium.lic b/tools/check-licenses/golden/patterns/notice/bsd/chromium.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/chromium.lic
rename to tools/check-licenses/golden/patterns/notice/bsd/chromium.lic
diff --git a/tools/check-licenses/golden/patterns/chromium2.lic b/tools/check-licenses/golden/patterns/notice/bsd/chromium2.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/chromium2.lic
rename to tools/check-licenses/golden/patterns/notice/bsd/chromium2.lic
diff --git a/tools/check-licenses/golden/patterns/cmake_project.lic b/tools/check-licenses/golden/patterns/notice/bsd/cmake_project.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/cmake_project.lic
rename to tools/check-licenses/golden/patterns/notice/bsd/cmake_project.lic
diff --git a/tools/check-licenses/golden/patterns/dart.lic b/tools/check-licenses/golden/patterns/notice/bsd/dart.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/dart.lic
rename to tools/check-licenses/golden/patterns/notice/bsd/dart.lic
diff --git a/tools/check-licenses/golden/patterns/edk2.lic b/tools/check-licenses/golden/patterns/notice/bsd/edk2.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/edk2.lic
rename to tools/check-licenses/golden/patterns/notice/bsd/edk2.lic
diff --git a/tools/check-licenses/golden/patterns/flutter.lic b/tools/check-licenses/golden/patterns/notice/bsd/flutter.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/flutter.lic
rename to tools/check-licenses/golden/patterns/notice/bsd/flutter.lic
diff --git a/tools/check-licenses/golden/patterns/fuchsia2.lic b/tools/check-licenses/golden/patterns/notice/bsd/fuchsia.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/fuchsia2.lic
rename to tools/check-licenses/golden/patterns/notice/bsd/fuchsia.lic
diff --git a/tools/check-licenses/golden/patterns/fuchsia3.lic b/tools/check-licenses/golden/patterns/notice/bsd/fuchsia2.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/fuchsia3.lic
rename to tools/check-licenses/golden/patterns/notice/bsd/fuchsia2.lic
diff --git a/tools/check-licenses/golden/patterns/fuchsia_bsd.lic b/tools/check-licenses/golden/patterns/notice/bsd/fuchsia_bsd.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/fuchsia_bsd.lic
rename to tools/check-licenses/golden/patterns/notice/bsd/fuchsia_bsd.lic
diff --git a/tools/check-licenses/golden/patterns/fuchsia_bsd2.lic b/tools/check-licenses/golden/patterns/notice/bsd/fuchsia_bsd2.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/fuchsia_bsd2.lic
rename to tools/check-licenses/golden/patterns/notice/bsd/fuchsia_bsd2.lic
diff --git a/tools/check-licenses/golden/patterns/go_authors.lic b/tools/check-licenses/golden/patterns/notice/bsd/go_authors.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/go_authors.lic
rename to tools/check-licenses/golden/patterns/notice/bsd/go_authors.lic
diff --git a/tools/check-licenses/golden/patterns/go_authors2.lic b/tools/check-licenses/golden/patterns/notice/bsd/go_authors2.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/go_authors2.lic
rename to tools/check-licenses/golden/patterns/notice/bsd/go_authors2.lic
diff --git a/tools/check-licenses/golden/patterns/google.lic b/tools/check-licenses/golden/patterns/notice/bsd/google.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/google.lic
rename to tools/check-licenses/golden/patterns/notice/bsd/google.lic
diff --git a/tools/check-licenses/golden/patterns/happy_bunny.lic b/tools/check-licenses/golden/patterns/notice/bsd/happy_bunny.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/happy_bunny.lic
rename to tools/check-licenses/golden/patterns/notice/bsd/happy_bunny.lic
diff --git a/tools/check-licenses/golden/patterns/intel.lic b/tools/check-licenses/golden/patterns/notice/bsd/intel.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/intel.lic
rename to tools/check-licenses/golden/patterns/notice/bsd/intel.lic
diff --git a/tools/check-licenses/golden/patterns/libavcodec.lic b/tools/check-licenses/golden/patterns/notice/bsd/libavcodec.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/libavcodec.lic
rename to tools/check-licenses/golden/patterns/notice/bsd/libavcodec.lic
diff --git a/tools/check-licenses/golden/patterns/lss_full.lic b/tools/check-licenses/golden/patterns/notice/bsd/lss_full.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/lss_full.lic
rename to tools/check-licenses/golden/patterns/notice/bsd/lss_full.lic
diff --git a/tools/check-licenses/golden/patterns/lz4.lic b/tools/check-licenses/golden/patterns/notice/bsd/lz4.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/lz4.lic
rename to tools/check-licenses/golden/patterns/notice/bsd/lz4.lic
diff --git a/tools/check-licenses/golden/patterns/markupsafe.lic b/tools/check-licenses/golden/patterns/notice/bsd/markupsafe.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/markupsafe.lic
rename to tools/check-licenses/golden/patterns/notice/bsd/markupsafe.lic
diff --git a/tools/check-licenses/golden/patterns/nacl.lic b/tools/check-licenses/golden/patterns/notice/bsd/nacl.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/nacl.lic
rename to tools/check-licenses/golden/patterns/notice/bsd/nacl.lic
diff --git a/tools/check-licenses/golden/patterns/openssl_project.lic b/tools/check-licenses/golden/patterns/notice/bsd/openssl_project.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/openssl_project.lic
rename to tools/check-licenses/golden/patterns/notice/bsd/openssl_project.lic
diff --git a/tools/check-licenses/golden/patterns/university_of_california.lic b/tools/check-licenses/golden/patterns/notice/bsd/university_of_california.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/university_of_california.lic
rename to tools/check-licenses/golden/patterns/notice/bsd/university_of_california.lic
diff --git a/tools/check-licenses/golden/patterns/university_of_california2.lic b/tools/check-licenses/golden/patterns/notice/bsd/university_of_california2.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/university_of_california2.lic
rename to tools/check-licenses/golden/patterns/notice/bsd/university_of_california2.lic
diff --git a/tools/check-licenses/golden/patterns/freetype.lic b/tools/check-licenses/golden/patterns/notice/freetype/freetype.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/freetype.lic
rename to tools/check-licenses/golden/patterns/notice/freetype/freetype.lic
diff --git a/tools/check-licenses/golden/patterns/freetype2.lic b/tools/check-licenses/golden/patterns/notice/freetype/freetype2.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/freetype2.lic
rename to tools/check-licenses/golden/patterns/notice/freetype/freetype2.lic
diff --git a/tools/check-licenses/golden/patterns/boring_ssl.lic b/tools/check-licenses/golden/patterns/notice/isc/boring_ssl.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/boring_ssl.lic
rename to tools/check-licenses/golden/patterns/notice/isc/boring_ssl.lic
diff --git a/tools/check-licenses/golden/patterns/fuchsia.lic b/tools/check-licenses/golden/patterns/notice/isc/fuchsia.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/fuchsia.lic
rename to tools/check-licenses/golden/patterns/notice/isc/fuchsia.lic
diff --git a/tools/check-licenses/golden/patterns/icu.lic b/tools/check-licenses/golden/patterns/notice/isc/icu.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/icu.lic
rename to tools/check-licenses/golden/patterns/notice/isc/icu.lic
diff --git a/tools/check-licenses/golden/patterns/isc.lic b/tools/check-licenses/golden/patterns/notice/isc/isc.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/isc.lic
rename to tools/check-licenses/golden/patterns/notice/isc/isc.lic
diff --git a/tools/check-licenses/golden/patterns/digital_equipment.lic b/tools/check-licenses/golden/patterns/notice/mit/digital_equipment.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/digital_equipment.lic
rename to tools/check-licenses/golden/patterns/notice/mit/digital_equipment.lic
diff --git a/tools/check-licenses/golden/patterns/fuchsia4.lic b/tools/check-licenses/golden/patterns/notice/mit/fuchsia.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/fuchsia4.lic
rename to tools/check-licenses/golden/patterns/notice/mit/fuchsia.lic
diff --git a/tools/check-licenses/golden/patterns/fuchsia_mit.lic b/tools/check-licenses/golden/patterns/notice/mit/fuchsia_mit.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/fuchsia_mit.lic
rename to tools/check-licenses/golden/patterns/notice/mit/fuchsia_mit.lic
diff --git a/tools/check-licenses/golden/patterns/icu2.lic b/tools/check-licenses/golden/patterns/notice/mit/icu.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/icu2.lic
rename to tools/check-licenses/golden/patterns/notice/mit/icu.lic
diff --git a/tools/check-licenses/golden/patterns/imgtec-pvr-rgx-km.lic b/tools/check-licenses/golden/patterns/notice/mit/imgtec-pvr-rgx-km.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/imgtec-pvr-rgx-km.lic
rename to tools/check-licenses/golden/patterns/notice/mit/imgtec-pvr-rgx-km.lic
diff --git a/tools/check-licenses/golden/patterns/jq_project.lic b/tools/check-licenses/golden/patterns/notice/mit/jq_project.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/jq_project.lic
rename to tools/check-licenses/golden/patterns/notice/mit/jq_project.lic
diff --git a/tools/check-licenses/golden/patterns/mesa_full.lic b/tools/check-licenses/golden/patterns/notice/mit/mesa_full.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/mesa_full.lic
rename to tools/check-licenses/golden/patterns/notice/mit/mesa_full.lic
diff --git a/tools/check-licenses/golden/patterns/microsoft.lic b/tools/check-licenses/golden/patterns/notice/mit/microsoft.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/microsoft.lic
rename to tools/check-licenses/golden/patterns/notice/mit/microsoft.lic
diff --git a/tools/check-licenses/golden/patterns/mit.lic b/tools/check-licenses/golden/patterns/notice/mit/mit.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/mit.lic
rename to tools/check-licenses/golden/patterns/notice/mit/mit.lic
diff --git a/tools/check-licenses/golden/patterns/mit2.lic b/tools/check-licenses/golden/patterns/notice/mit/mit2.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/mit2.lic
rename to tools/check-licenses/golden/patterns/notice/mit/mit2.lic
diff --git a/tools/check-licenses/golden/patterns/mit3.lic b/tools/check-licenses/golden/patterns/notice/mit/mit3.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/mit3.lic
rename to tools/check-licenses/golden/patterns/notice/mit/mit3.lic
diff --git a/tools/check-licenses/golden/patterns/mit4.lic b/tools/check-licenses/golden/patterns/notice/mit/mit4.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/mit4.lic
rename to tools/check-licenses/golden/patterns/notice/mit/mit4.lic
diff --git a/tools/check-licenses/golden/patterns/musl_full.lic b/tools/check-licenses/golden/patterns/notice/mit/musl_full.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/musl_full.lic
rename to tools/check-licenses/golden/patterns/notice/mit/musl_full.lic
diff --git a/tools/check-licenses/golden/patterns/ncd.lic b/tools/check-licenses/golden/patterns/notice/mit/ncd.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/ncd.lic
rename to tools/check-licenses/golden/patterns/notice/mit/ncd.lic
diff --git a/tools/check-licenses/golden/patterns/notice/mit/oracle.lic b/tools/check-licenses/golden/patterns/notice/mit/oracle.lic
new file mode 100644
index 0000000..43af751
--- /dev/null
+++ b/tools/check-licenses/golden/patterns/notice/mit/oracle.lic
@@ -0,0 +1,25 @@
+Copyright \(c\) [\d]{4}, Oracle America, Inc.
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following
+disclaimer in the documentation and\/or other materials
+provided with the distribution.
+Neither the name of the \"Oracle America, Inc.\" nor the names of its
+contributors may be used to endorse or promote products derived
+from this software without specific prior written permission.
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES \(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION\) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT \(INCLUDING
+NEGLIGENCE OR OTHERWISE\) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/tools/check-licenses/golden/patterns/packard.lic b/tools/check-licenses/golden/patterns/notice/mit/packard.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/packard.lic
rename to tools/check-licenses/golden/patterns/notice/mit/packard.lic
diff --git a/tools/check-licenses/golden/patterns/rarick.lic b/tools/check-licenses/golden/patterns/notice/mit/rarick.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/rarick.lic
rename to tools/check-licenses/golden/patterns/notice/mit/rarick.lic
diff --git a/tools/check-licenses/golden/patterns/sgi.lic b/tools/check-licenses/golden/patterns/notice/mit/sgi.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/sgi.lic
rename to tools/check-licenses/golden/patterns/notice/mit/sgi.lic
diff --git a/tools/check-licenses/golden/patterns/unicode_license_full.lic b/tools/check-licenses/golden/patterns/notice/mit/unicode_license_full.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/unicode_license_full.lic
rename to tools/check-licenses/golden/patterns/notice/mit/unicode_license_full.lic
diff --git a/tools/check-licenses/golden/patterns/vulkan.lic b/tools/check-licenses/golden/patterns/notice/mit/vulkan.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/vulkan.lic
rename to tools/check-licenses/golden/patterns/notice/mit/vulkan.lic
diff --git a/tools/check-licenses/golden/patterns/vulkan2.lic b/tools/check-licenses/golden/patterns/notice/mit/vulkan2.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/vulkan2.lic
rename to tools/check-licenses/golden/patterns/notice/mit/vulkan2.lic
diff --git a/tools/check-licenses/golden/patterns/xfree86.lic b/tools/check-licenses/golden/patterns/notice/mit/xfree86.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/xfree86.lic
rename to tools/check-licenses/golden/patterns/notice/mit/xfree86.lic
diff --git a/tools/check-licenses/golden/patterns/yoran_heling.lic b/tools/check-licenses/golden/patterns/notice/mit/yoran_heling.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/yoran_heling.lic
rename to tools/check-licenses/golden/patterns/notice/mit/yoran_heling.lic
diff --git a/tools/check-licenses/golden/patterns/llvm_project.lic b/tools/check-licenses/golden/patterns/notice/ncsa/llvm_project.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/llvm_project.lic
rename to tools/check-licenses/golden/patterns/notice/ncsa/llvm_project.lic
diff --git a/tools/check-licenses/golden/patterns/university_of_illinois.lic b/tools/check-licenses/golden/patterns/notice/ncsa/university_of_illinois.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/university_of_illinois.lic
rename to tools/check-licenses/golden/patterns/notice/ncsa/university_of_illinois.lic
diff --git a/tools/check-licenses/golden/patterns/aac_full.lic b/tools/check-licenses/golden/patterns/notice/other/aac_full.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/aac_full.lic
rename to tools/check-licenses/golden/patterns/notice/other/aac_full.lic
diff --git a/tools/check-licenses/golden/patterns/flite.lic b/tools/check-licenses/golden/patterns/notice/other/flite.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/flite.lic
rename to tools/check-licenses/golden/patterns/notice/other/flite.lic
diff --git a/tools/check-licenses/golden/patterns/icu3.lic b/tools/check-licenses/golden/patterns/notice/other/icu.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/icu3.lic
rename to tools/check-licenses/golden/patterns/notice/other/icu.lic
diff --git a/tools/check-licenses/golden/patterns/original_sslea_license.lic b/tools/check-licenses/golden/patterns/notice/other/original_sslea_license.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/original_sslea_license.lic
rename to tools/check-licenses/golden/patterns/notice/other/original_sslea_license.lic
diff --git a/tools/check-licenses/golden/patterns/pthreads.lic b/tools/check-licenses/golden/patterns/notice/other/pthreads.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/pthreads.lic
rename to tools/check-licenses/golden/patterns/notice/other/pthreads.lic
diff --git a/tools/check-licenses/golden/patterns/readme_ijg.lic b/tools/check-licenses/golden/patterns/notice/other/readme_ijg.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/readme_ijg.lic
rename to tools/check-licenses/golden/patterns/notice/other/readme_ijg.lic
diff --git a/tools/check-licenses/golden/patterns/python_project.lic b/tools/check-licenses/golden/patterns/notice/python/python_project.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/python_project.lic
rename to tools/check-licenses/golden/patterns/notice/python/python_project.lic
diff --git a/tools/check-licenses/golden/patterns/libpng.lic b/tools/check-licenses/golden/patterns/notice/zlib/libpng.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/libpng.lic
rename to tools/check-licenses/golden/patterns/notice/zlib/libpng.lic
diff --git a/tools/check-licenses/golden/patterns/valgrind.lic b/tools/check-licenses/golden/patterns/notice/zlib/valgrind.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/valgrind.lic
rename to tools/check-licenses/golden/patterns/notice/zlib/valgrind.lic
diff --git a/tools/check-licenses/golden/patterns/zlib.lic b/tools/check-licenses/golden/patterns/notice/zlib/zlib.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/zlib.lic
rename to tools/check-licenses/golden/patterns/notice/zlib/zlib.lic
diff --git a/tools/check-licenses/golden/patterns/zlib_full.lic b/tools/check-licenses/golden/patterns/notice/zlib/zlib_full.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/zlib_full.lic
rename to tools/check-licenses/golden/patterns/notice/zlib/zlib_full.lic
diff --git a/tools/check-licenses/golden/patterns/oracle.lic b/tools/check-licenses/golden/patterns/oracle.lic
deleted file mode 100644
index c30d074..0000000
--- a/tools/check-licenses/golden/patterns/oracle.lic
+++ /dev/null
@@ -1,25 +0,0 @@
-[\s*\/#]* Copyright \(c\) [\d]{4}, Oracle America, Inc.
-[\s*\/]*Redistribution and use in source and binary forms, with or without
-[\s*\/]*modification, are permitted provided that the following conditions are
-[\s*\/]*met:
-[\s*\/]*Redistributions of source code must retain the above copyright
-[\s*\/]* notice, this list of conditions and the following disclaimer.
-[\s*\/]* Redistributions in binary form must reproduce the above
-[\s*\/]*copyright notice, this list of conditions and the following
-[\s*\/]*disclaimer in the documentation and\/or other materials
-[\s*\/]*provided with the distribution.
-[\s*\/]*Neither the name of the \"Oracle America, Inc.\" nor the names of its
-[\s*\/]*contributors may be used to endorse or promote products derived
-[\s*\/]*from this software without specific prior written permission.
-[\s*\/]*THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-[\s*\/]*\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-[\s*\/]*LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-[\s*\/]*FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-[\s*\/]*COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
-[\s*\/]*INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-[\s*\/]*DAMAGES \(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
-[\s*\/]*GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-[\s*\/]*INTERRUPTION\) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-[\s*\/]*WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT \(INCLUDING
-[\s*\/]*NEGLIGENCE OR OTHERWISE\) ARISING IN ANY WAY OUT OF THE USE
-[\s*\/]*OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/tools/check-licenses/golden/patterns/arm.lic b/tools/check-licenses/golden/patterns/restricted/gpl/arm.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/arm.lic
rename to tools/check-licenses/golden/patterns/restricted/gpl/arm.lic
diff --git a/tools/check-licenses/golden/patterns/ffmpeg_project.lic b/tools/check-licenses/golden/patterns/restricted/gpl/ffmpeg_project.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/ffmpeg_project.lic
rename to tools/check-licenses/golden/patterns/restricted/gpl/ffmpeg_project.lic
diff --git a/tools/check-licenses/golden/patterns/gcc.lic b/tools/check-licenses/golden/patterns/restricted/gpl/gcc.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/gcc.lic
rename to tools/check-licenses/golden/patterns/restricted/gpl/gcc.lic
diff --git a/tools/check-licenses/golden/patterns/gcc2.lic b/tools/check-licenses/golden/patterns/restricted/gpl/gcc2.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/gcc2.lic
rename to tools/check-licenses/golden/patterns/restricted/gpl/gcc2.lic
diff --git a/tools/check-licenses/golden/patterns/gcc3.lic b/tools/check-licenses/golden/patterns/restricted/gpl/gcc3.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/gcc3.lic
rename to tools/check-licenses/golden/patterns/restricted/gpl/gcc3.lic
diff --git a/tools/check-licenses/golden/patterns/gnu.lic b/tools/check-licenses/golden/patterns/restricted/gpl/gnu.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/gnu.lic
rename to tools/check-licenses/golden/patterns/restricted/gpl/gnu.lic
diff --git a/tools/check-licenses/golden/patterns/gnu2.lic b/tools/check-licenses/golden/patterns/restricted/gpl/gnu2.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/gnu2.lic
rename to tools/check-licenses/golden/patterns/restricted/gpl/gnu2.lic
diff --git a/tools/check-licenses/golden/patterns/gnu3.lic b/tools/check-licenses/golden/patterns/restricted/gpl/gnu3.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/gnu3.lic
rename to tools/check-licenses/golden/patterns/restricted/gpl/gnu3.lic
diff --git a/tools/check-licenses/golden/patterns/gnu4.lic b/tools/check-licenses/golden/patterns/restricted/gpl/gnu4.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/gnu4.lic
rename to tools/check-licenses/golden/patterns/restricted/gpl/gnu4.lic
diff --git a/tools/check-licenses/golden/patterns/gnu5.lic b/tools/check-licenses/golden/patterns/restricted/gpl/gnu5.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/gnu5.lic
rename to tools/check-licenses/golden/patterns/restricted/gpl/gnu5.lic
diff --git a/tools/check-licenses/golden/patterns/gnu6.lic b/tools/check-licenses/golden/patterns/restricted/gpl/gnu6.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/gnu6.lic
rename to tools/check-licenses/golden/patterns/restricted/gpl/gnu6.lic
diff --git a/tools/check-licenses/golden/patterns/gpl.lic b/tools/check-licenses/golden/patterns/restricted/gpl/gpl.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/gpl.lic
rename to tools/check-licenses/golden/patterns/restricted/gpl/gpl.lic
diff --git a/tools/check-licenses/golden/patterns/lgpl_full.lic b/tools/check-licenses/golden/patterns/restricted/gpl/lgpl_full.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/lgpl_full.lic
rename to tools/check-licenses/golden/patterns/restricted/gpl/lgpl_full.lic
diff --git a/tools/check-licenses/golden/patterns/linux_gnu.lic b/tools/check-licenses/golden/patterns/restricted/gpl/linux_gnu.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/linux_gnu.lic
rename to tools/check-licenses/golden/patterns/restricted/gpl/linux_gnu.lic
diff --git a/tools/check-licenses/golden/patterns/redhat.lic b/tools/check-licenses/golden/patterns/restricted/gpl/redhat.lic
similarity index 100%
rename from tools/check-licenses/golden/patterns/redhat.lic
rename to tools/check-licenses/golden/patterns/restricted/gpl/redhat.lic
diff --git a/tools/check-licenses/licenses.go b/tools/check-licenses/licenses.go
index d98dd09..95f96b8 100644
--- a/tools/check-licenses/licenses.go
+++ b/tools/check-licenses/licenses.go
@@ -26,19 +26,22 @@
 // the .lic folder location specified in Config
 func NewLicenses(ctx context.Context, root string, prohibitedLicenseTypes []string) (*Licenses, error) {
 	defer trace.StartRegion(ctx, "NewLicenses").End()
-	f, err := os.Open(root)
-	if err != nil {
-		return nil, err
-	}
-	names, err := f.Readdirnames(0)
-	f.Close()
+	licensesPath := []string{}
+	err := filepath.Walk(root,
+		func(path string, info os.FileInfo, err error) error {
+			if info.IsDir() {
+				return nil
+			}
+			licensesPath = append(licensesPath, path)
+			return nil
+		})
 	if err != nil {
 		return nil, err
 	}
 
 	l := &Licenses{}
-	for _, n := range names {
-		bytes, err := ioutil.ReadFile(filepath.Join(root, n))
+	for _, path := range licensesPath {
+		bytes, err := ioutil.ReadFile(path)
 		if err != nil {
 			return nil, err
 		}
@@ -52,14 +55,15 @@
 
 		re, err := regexp.Compile(regex)
 		if err != nil {
-			return nil, fmt.Errorf("%s: %w", n, err)
+			return nil, fmt.Errorf("%s: %w", path, err)
 		}
+		base := filepath.Base(path)
 		l.licenses = append(
 			l.licenses,
 			&License{
 				pattern:      re,
-				Category:     n,
-				ValidType:    contains(prohibitedLicenseTypes, n),
+				Category:     base,
+				ValidType:    contains(prohibitedLicenseTypes, filepath.Base(base)),
 				matches:      map[string]*Match{},
 				matchChannel: make(chan *Match, 10),
 			})
diff --git a/tools/check-licenses/licenses_test.go b/tools/check-licenses/licenses_test.go
index 9930bdf..2dbd3c6 100644
--- a/tools/check-licenses/licenses_test.go
+++ b/tools/check-licenses/licenses_test.go
@@ -12,15 +12,19 @@
 )
 
 func TestLicensesMatchSingleLicenseFile(t *testing.T) {
+	configPath := filepath.Join(*testDataDir, "filetree", "simple.json")
+	config, err := NewConfig(configPath)
+	if err != nil {
+		t.Fatal(err)
+	}
+
 	folder := mkLicenseDir(t)
 	l, err := NewLicenses(context.Background(), folder, []string{"gcc"})
 	if err != nil {
 		t.Fatalf("NewLicenses(...): %s", err)
 	}
-	metrics := &Metrics{}
-	metrics.Init()
-	ft := &FileTree{}
-	ft.Init()
+	metrics := NewMetrics()
+	ft := NewFileTree(context.Background(), folder, nil, config, metrics)
 	data := []byte("This is very Apache licensed\nCopyright Foo\n")
 	l.MatchSingleLicenseFile(data, "foo.rs", metrics, ft)
 	data = []byte("BSD much.\nCopyright Bar Inc\n")
@@ -36,8 +40,7 @@
 	if err != nil {
 		t.Fatalf("NewLicenses(...): %s", err)
 	}
-	metrics := &Metrics{}
-	metrics.Init()
+	metrics := NewMetrics()
 	data := []byte("This is very Apache licensed\nCopyright Foo\n")
 	ok, _ := l.MatchFile(data, "foo.rs", metrics)
 	if !ok {
diff --git a/tools/check-licenses/metrics.go b/tools/check-licenses/metrics.go
index 4f1262e..a2d6a5d 100644
--- a/tools/check-licenses/metrics.go
+++ b/tools/check-licenses/metrics.go
@@ -17,22 +17,26 @@
 	sync.RWMutex
 }
 
-func (metrics *Metrics) Init() {
-	metrics.values = make(map[string]uint)
-	metrics.order = []string{
-		"num_extensions_excluded",
-		"num_licensed",
-		"num_non_single_license_files",
-		"num_one_file_matched_to_multiple_single_licenses",
-		"num_one_file_matched_to_one_single_license",
-		"num_single_license_file_match",
-		"num_single_license_files",
-		"num_unlicensed",
-		"num_with_project_license",
+func NewMetrics() *Metrics {
+	m := &Metrics{
+		values: make(map[string]uint),
+		order: []string{
+			"num_extensions_excluded",
+			"num_licensed",
+			"num_non_single_license_files",
+			"num_one_file_matched_to_multiple_single_licenses",
+			"num_one_file_matched_to_one_single_license",
+			"num_single_license_file_match",
+			"num_single_license_files",
+			"num_unlicensed",
+			"num_with_project_license",
+		},
 	}
-	for _, key := range metrics.order {
-		metrics.values[key] = 0
+
+	for _, key := range m.order {
+		m.values[key] = 0
 	}
+	return m
 }
 
 func (metrics *Metrics) increment(key string) {
diff --git a/tools/check-licenses/metrics_test.go b/tools/check-licenses/metrics_test.go
index 5b7e4e7..6431e47 100644
--- a/tools/check-licenses/metrics_test.go
+++ b/tools/check-licenses/metrics_test.go
@@ -9,8 +9,7 @@
 )
 
 func TestMetricsInit(t *testing.T) {
-	var metrics Metrics
-	metrics.Init()
+	metrics := NewMetrics()
 	num_values := len(metrics.values)
 	num_order := len(metrics.order)
 	want := 0
diff --git a/tools/check-licenses/template_test.go b/tools/check-licenses/template_test.go
index b8f6b00..daa23d6 100644
--- a/tools/check-licenses/template_test.go
+++ b/tools/check-licenses/template_test.go
@@ -16,8 +16,8 @@
 	if err := ioutil.WriteFile(path, []byte(json), 0o600); err != nil {
 		t.Errorf("%v(): got %v", t.Name(), err)
 	}
-	var config Config
-	if err := config.Init(path); err != nil {
+	config, err := NewConfig(path)
+	if err != nil {
 		t.Errorf("%v(): got %v", t.Name(), err)
 	}
 	// TODO(omerlevran): Add test.
diff --git a/tools/check-licenses/testdata/filetree/empty.json b/tools/check-licenses/testdata/filetree/empty.json
new file mode 100644
index 0000000..a7cf1c8
--- /dev/null
+++ b/tools/check-licenses/testdata/filetree/empty.json
@@ -0,0 +1,33 @@
+{
+   "filesRegex":[
+   ],
+   "skipFiles":[
+      ".gitignore",
+      ".empty"
+   ],
+   "skipDirs":[
+      ".git"
+   ],
+   "textExtensionList":[
+      "go",
+      "py"
+   ],
+   "maxReadSize":6144,
+   "separatorWidth":80,
+   "outputFilePrefix":"NOTICE",
+   "outputFileExtension":"txt",
+   "product":"astro",
+   "singleLicenseFiles":[
+      "LICENSE"
+   ],
+   "licensePatternDir":"golden/",
+   "baseDir":".",
+   "target":"all",
+   "logLevel":"verbose",
+   "customProjectLicenses":[
+      {
+         "projectRoot":"test",
+         "licenseLocation":"test"
+      }
+   ]
+}
diff --git a/tools/check-licenses/testdata/filetree/empty/.empty b/tools/check-licenses/testdata/filetree/empty/.empty
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tools/check-licenses/testdata/filetree/empty/.empty
diff --git a/tools/check-licenses/traverse.go b/tools/check-licenses/traverse.go
index 168d08b..e3fea5d 100644
--- a/tools/check-licenses/traverse.go
+++ b/tools/check-licenses/traverse.go
@@ -30,8 +30,7 @@
 func Walk(ctx context.Context, config *Config) error {
 	var eg errgroup.Group
 	var wg sync.WaitGroup
-	metrics := new(Metrics)
-	metrics.Init()
+	metrics := NewMetrics()
 	file_tree := NewFileTree(ctx, config.BaseDir, nil, config, metrics)
 	licenses, err := NewLicenses(ctx, config.LicensePatternDir, config.ProhibitedLicenseTypes)
 	if err != nil {
diff --git a/tools/devshell/contrib/triage b/tools/devshell/contrib/triage
index a232949..47ec6d3 100755
--- a/tools/devshell/contrib/triage
+++ b/tools/devshell/contrib/triage
@@ -144,7 +144,8 @@
 if [ -z "${select_filters}" ]; then
 
   if (( !"${#config_paths[@]}" )); then
-    config_paths=( "${FUCHSIA_DIR}/src/diagnostics/config/triage/" )
+    config_paths=( "${FUCHSIA_DIR}/src/diagnostics/config/triage/"
+                   "${FUCHSIA_DIR}/src/diagnostics/config/triage/detect/" )
   fi
 
   config_files=( )
diff --git a/tools/devshell/shell b/tools/devshell/shell
index fa1d63f..705fc5b 100755
--- a/tools/devshell/shell
+++ b/tools/devshell/shell
@@ -105,7 +105,7 @@
 fi
 device_addr="$(get-fuchsia-device-addr)" || exit $?
 if [[ -z "${device_addr}" ]]; then
-  # Error output is provided to stderr by get-fuchsia-device-addr
+  fx-error "Cannot find a device."
   exit 1
 fi
 args+=( "${device_addr}" )
@@ -116,13 +116,4 @@
 # failures much harder to diagnose when helping people. The control master will
 # mean you only get one per TCP socket, which is once per newly booted host.
 # It's not a huge burden compared to end user support.
-fx-command-run ssh "${args[@]}"
-status=$?
-
-# FX_CALLER is not empty if this script was executed via fx-command-run/exec
-# and we only want to show this hint message when the user executes 'fx shell'
-# directly.
-if [[ -z "${FX_CALLER}" && $status -eq 255 ]]; then
-  echo "Hint: use 'fx shell --check' to diagnose SSH configuration issues."
-fi
-exit $status
+fx-command-exec ssh "${args[@]}"
diff --git a/tools/devshell/tests/subcommands/fx_test_test b/tools/devshell/tests/subcommands/fx_test_test
index ce94830..b463688 100644
--- a/tools/devshell/tests/subcommands/fx_test_test
+++ b/tools/devshell/tests/subcommands/fx_test_test
@@ -56,7 +56,7 @@
   cp "${DATA_DIR}/tests_package_server_integration.json" "${BT_TEMP_DIR}/out/default/tests.json"
   local out="${BT_TEMP_DIR}/_fx_test_output"
   local testname="overflow_fuzzer_test"
-  BT_EXPECT ${fx} test ${testname} > ${out}
+  BT_EXPECT ${fx} test --no-use-package-hash ${testname} > ${out}
   # ensure that is-package-server-running was called
   BT_ASSERT_FILE_EXISTS "${BT_TEMP_DIR}/tools/devshell/is-package-server-running.mock_state"
   # ensure that update-if-in-base was called with the proper testname
@@ -68,7 +68,7 @@
   cp "${DATA_DIR}/tests_package_server_integration.json" "${BT_TEMP_DIR}/out/default/tests.json"
   local out="${BT_TEMP_DIR}/_fx_test_output"
   local testname="overflow_fuzzer_test"
-  BT_EXPECT ${fx} test ${testname} > ${out}
+  BT_EXPECT ${fx} test --no-use-package-hash ${testname} > ${out}
   # ensure that fx build was called
   # TODO: once fx test calls fx build with a specific target, check it here as well
   BT_ASSERT_FILE_EXISTS "${BT_TEMP_DIR}/tools/devshell/build.mock_state"
@@ -80,7 +80,7 @@
   local out="${BT_TEMP_DIR}/_fx_test_output"
   local testname="overflow_fuzzer_test"
   echo 1 > "${BT_TEMP_DIR}/tools/devshell/build.mock_status"
-  BT_EXPECT_FAIL ${fx} test ${testname} > ${out}
+  BT_EXPECT_FAIL ${fx} test --no-use-package-hash ${testname} > ${out}
   # ensure that fx build was called
   # TODO: once fx test calls fx build with a specific target, check it here as well
   BT_ASSERT_FILE_EXISTS "${BT_TEMP_DIR}/tools/devshell/build.mock_state"
@@ -91,7 +91,7 @@
   cp "${DATA_DIR}/tests_package_server_integration.json" "${BT_TEMP_DIR}/out/default/tests.json"
   local out="${BT_TEMP_DIR}/_fx_test_output"
   local testname="overflow_fuzzer_test"
-  BT_EXPECT ${fx} test --no-build ${testname} > ${out}
+  BT_EXPECT ${fx} test --no-use-package-hash --no-build ${testname} > ${out}
   # ensure that fx build was called
   # TODO: once fx test calls fx build with a specific target, check it here as well
   BT_ASSERT_FILE_DOES_NOT_EXIST "${BT_TEMP_DIR}/tools/devshell/build.mock_state"
@@ -104,7 +104,7 @@
   local out="${BT_TEMP_DIR}/_fx_test_output"
   local testname1="overflow_fuzzer_test"
   echo 1 > "${BT_TEMP_DIR}/tools/devshell/is-package-server-running.mock_status"
-  BT_EXPECT_FAIL ${fx} test --no-build ${testname} > ${out}
+  BT_EXPECT_FAIL ${fx} test --no-use-package-hash --no-build ${testname} > ${out}
   # ensure that fx is-package-server-running was called
   BT_ASSERT_FILE_EXISTS "${BT_TEMP_DIR}/tools/devshell/is-package-server-running.mock_state"
 }
@@ -152,10 +152,10 @@
   btf::expect-mock-args "${BT_TEMP_DIR}/tools/devshell/shell" "run-test-component" "${packageUrl}"
 }
 
-# Test that "fx test" doesn't fail if a component test doesn't have a hashfile
-# property
+# Test that "fx test" fails if a component test doesn't have an entry in the
+# package repository
 TEST_fxtest_no_hashfile() {
-  mkdir -p "${BT_TEMP_DIR}/out/default"
+  cp -R "${DATA_DIR}/tests_hashfile/out" "${BT_TEMP_DIR}"
   cat > "${BT_TEMP_DIR}/out/default/tests.json" <<EOF
   [{"environments": [],
     "test": {
@@ -163,27 +163,27 @@
       "label": "//examples/fuzzer:fuzzing-examples_pkg(//build/toolchain/fuchsia:arm64)",
       "name": "overflow_fuzzer_test",
       "os": "fuchsia",
-      "package_url": "fuchsia-pkg://fuchsia.com/example-fuzzers#meta/overflow_fuzzer_test.cmx",
+      "package_url": "fuchsia-pkg://fuchsia.com/example_not_in_repository#meta/overflow_fuzzer_test.cmx",
       "path": ""
     }
   }]
 EOF
   local out="${BT_TEMP_DIR}/_fx_test_output"
 
-  # expect that "fx shell run-test-component URL-WITHOUT-HASH" is executed
-  BT_EXPECT ${fx} test --no-build overflow_fuzzer_test >> "${out}"
-  btf::expect-mock-args "${BT_TEMP_DIR}/tools/devshell/shell" \
-    "run-test-component" "fuchsia-pkg://fuchsia.com/example-fuzzers#meta/overflow_fuzzer_test.cmx"
+  # expect that fx test fails because the repository doesn't have an entry for
+  # this test's package (example_not_in_repository)
+  BT_EXPECT_FAIL ${fx} test --no-build overflow_fuzzer_test >> "${out}"
 }
 
-# Test that "fx test" builds only the "updates" target for device tests
-TEST_fxtest_build_device_only_updates() {
+# Test that "fx test" builds only the minimal target for device tests
+TEST_fxtest_build_device_only_package() {
   mkdir -p "${BT_TEMP_DIR}/out/default"
   cat > "${BT_TEMP_DIR}/out/default/tests.json" <<EOF
   [{"environments": [],
     "test": {
       "cpu": "arm64",
       "label": "//examples/fuzzer:fuzzing-examples_pkg(//build/toolchain/fuchsia:arm64)",
+      "package_label": "//examples/fuzzer:fuzzing_pkg(//build/toolchain/fuchsia:arm64)",
       "name": "overflow_fuzzer_test",
       "os": "fuchsia",
       "package_url": "fuchsia-pkg://fuchsia.com/example-fuzzers#meta/overflow_fuzzer_test.cmx",
@@ -193,9 +193,12 @@
 EOF
   local out="${BT_TEMP_DIR}/_fx_test_output"
 
-  # expect "fx shell build updates"
-  BT_EXPECT ${fx} test overflow_fuzzer_test >> "${out}"
-  btf::expect-mock-args "${BT_TEMP_DIR}/tools/devshell/build" "updates"
+  # with incremental enabled, expect "fx shell build <test_package>"
+  BT_EXPECT ${fx} --enable=incremental test --no-use-package-hash overflow_fuzzer_test >> "${out}"
+  btf::expect-mock-args "${BT_TEMP_DIR}/tools/devshell/build" "examples/fuzzer:fuzzing_pkg"
+  # with incremental disabled, expect "fx shell build updates"
+  BT_EXPECT ${fx} --disable=incremental test --no-use-package-hash overflow_fuzzer_test >> "${out}"
+  btf::expect-mock-args "${BT_TEMP_DIR}/tools/devshell/build.mock_state.2" "updates"
 }
 
 # Test that "fx test" builds the default target for an e2e test
@@ -241,7 +244,7 @@
 
   # expect "fx shell build"
   BT_EXPECT ${fx} test example_host_test >> "${out}"
-  btf::expect-mock-args "${BT_TEMP_DIR}/tools/devshell/build" "host_x64/example_host_test"
+  btf::expect-mock-args "${BT_TEMP_DIR}/tools/devshell/build" "host_x64/example_host_test" "--no-zircon"
 }
 
 # Test that "fx test" executes an e2e test with the proper env variables set
diff --git a/tools/devshell/vdl b/tools/devshell/vdl
index afeb146..f86cd84 100755
--- a/tools/devshell/vdl
+++ b/tools/devshell/vdl
@@ -18,5 +18,37 @@
 export FUCHSIA_BUILD_DIR
 export FUCHSIA_ZBI_COMPRESSION
 
-FVDL="${HOST_OUT_DIR}/fvdl"
-"${FVDL}" "$@"
+function main {
+  local build=false
+  if is_feature_enabled "incremental"; then
+    # In incremental workflows, build/refresh images is enabled by default
+    build=true
+  fi
+  args=()
+  while (( $# )); do
+    case "$1" in
+      --no-build)
+        build=false
+        ;;
+      --build)
+        build=true
+        ;;
+      --help|kill)
+        # these options don't require building images
+        build=false
+        args+=( "$1" )
+        ;;
+      *)
+        args+=( "$1" )
+    esac
+    shift
+  done
+
+  if $build; then
+    fx-info "Building/refreshing target 'images'"
+    fx-command-run build images
+  fi
+  fx-command-exec host-tool fvdl "${args[@]}"
+}
+
+main "$@"
\ No newline at end of file
diff --git a/tools/fidlcat/lib/interception_workflow.cc b/tools/fidlcat/lib/interception_workflow.cc
index 5184956..866b354 100644
--- a/tools/fidlcat/lib/interception_workflow.cc
+++ b/tools/fidlcat/lib/interception_workflow.cc
@@ -145,6 +145,11 @@
   workflow_->ProcessDetached(process->GetKoid());
 }
 
+void InterceptingProcessObserver::OnSymbolLoadFailure(zxdb::Process* process,
+                                                      const zxdb::Err& err) {
+  FX_LOGS(ERROR) << " cannot load symbols for process " << process->GetKoid() << ": " << err.msg();
+}
+
 InterceptionWorkflow::InterceptionWorkflow()
     : session_(new zxdb::Session()),
       delete_session_(true),
diff --git a/tools/fidlcat/lib/interception_workflow.h b/tools/fidlcat/lib/interception_workflow.h
index 9a67429..9b6dc8f 100644
--- a/tools/fidlcat/lib/interception_workflow.h
+++ b/tools/fidlcat/lib/interception_workflow.h
@@ -60,6 +60,7 @@
 
   void DidCreateProcess(zxdb::Process* process, bool autoattached) override;
   void WillDestroyProcess(zxdb::Process* process, DestroyReason reason, int exit_code) override;
+  void OnSymbolLoadFailure(zxdb::Process* process, const zxdb::Err& err) override;
 
  private:
   InterceptionWorkflow* workflow_;
diff --git a/tools/qemu/qemu.go b/tools/qemu/qemu.go
index 089dcb5..18e9c90 100644
--- a/tools/qemu/qemu.go
+++ b/tools/qemu/qemu.go
@@ -78,6 +78,16 @@
 	Forwards []Forward
 }
 
+// HCI identifies a Host Controller Interface.
+type HCI string
+
+// Host Controller Interface constants.
+const (
+	// XHCI is the Extensible Host Controller Interface.
+	// https://en.wikipedia.org/wiki/Extensible_Host_Controller_Interface
+	XHCI = "xhci"
+)
+
 // NetdevTap defines a netdev backend giving a tap interface.
 type NetdevTap struct {
 	// Name is the name of the interface.
@@ -170,6 +180,20 @@
 	q.SetFlag("-device", device)
 }
 
+func (q *QEMUCommandBuilder) AddUSBDrive(d Drive) {
+	q.SetFlag("-drive", fmt.Sprintf("if=none,id=%s,file=%s,format=raw", d.ID, d.File))
+	q.SetFlag("-device", fmt.Sprintf("usb-storage,drive=%s", d.ID))
+}
+
+// AddHCI adds an host-controller-interface.
+func (q *QEMUCommandBuilder) AddHCI(hci HCI) error {
+	if hci != XHCI {
+		return fmt.Errorf("unimplemented host controller interface: %q", hci)
+	}
+	q.SetFlag("-device", "qemu-xhci,id=xhci")
+	return nil
+}
+
 func (q *QEMUCommandBuilder) AddSerial(c Chardev) {
 	var builder strings.Builder
 	builder.WriteString(fmt.Sprintf("stdio,id=%s", c.ID))
diff --git a/tools/qemu/qemu_test.go b/tools/qemu/qemu_test.go
index c069dce..0b0c17c 100644
--- a/tools/qemu/qemu_test.go
+++ b/tools/qemu/qemu_test.go
@@ -62,6 +62,11 @@
 		err: fmt.Errorf("QEMU initrd path must be set."),
 	}, cmd, err)
 
+	// Invalid HCI
+	if err := b.AddHCI("invalid"); err == nil {
+		t.Errorf("SetHCI(invalid) got nil but wanted an error")
+	}
+
 	b.SetInitrd("./data/zircon-a")
 
 	cmd, err = b.Build()
@@ -199,4 +204,58 @@
 			"-append", "kernel.serial=legacy infra.foo=bar"},
 		err: nil,
 	}, cmd, err)
+
+	b.AddUSBDrive(Drive{
+		ID:   "usb",
+		File: "/usbdrive",
+		Addr: "2.0",
+	})
+
+	cmd, err = b.Build()
+	check(t, expected{
+		cmd: []string{
+			"./bin/qemu",
+			"-kernel", "./data/qemu-kernel",
+			"-initrd", "./data/zircon-a",
+			"-machine", "virt-2.12,gic-version=host",
+			"-cpu", "host",
+			"-enable-kvm",
+			"-m", "4096",
+			"-smp", "4",
+			"-object", "iothread,id=iothread-otherdisk",
+			"-drive", "id=otherdisk,file=./data/otherdisk,format=raw,if=none,cache=unsafe,aio=threads",
+			"-device", "virtio-blk-pci,drive=otherdisk,iothread=iothread-otherdisk,addr=04.2",
+			"-chardev", "stdio,id=char0,logfile=logfile.txt,signal=off",
+			"-serial", "chardev:char0",
+			"-netdev", "user,id=net0",
+			"-device", "virtio-net-pci,netdev=net0,mac=52:54:00:63:5e:7a",
+			"-drive", "if=none,id=usb,file=/usbdrive,format=raw",
+			"-device", "usb-storage,drive=usb",
+			"-append", "kernel.serial=legacy infra.foo=bar"},
+	}, cmd, err)
+
+	b.AddHCI(XHCI)
+	cmd, err = b.Build()
+	check(t, expected{
+		cmd: []string{
+			"./bin/qemu",
+			"-kernel", "./data/qemu-kernel",
+			"-initrd", "./data/zircon-a",
+			"-machine", "virt-2.12,gic-version=host",
+			"-cpu", "host",
+			"-enable-kvm",
+			"-m", "4096",
+			"-smp", "4",
+			"-object", "iothread,id=iothread-otherdisk",
+			"-drive", "id=otherdisk,file=./data/otherdisk,format=raw,if=none,cache=unsafe,aio=threads",
+			"-device", "virtio-blk-pci,drive=otherdisk,iothread=iothread-otherdisk,addr=04.2",
+			"-chardev", "stdio,id=char0,logfile=logfile.txt,signal=off",
+			"-serial", "chardev:char0",
+			"-netdev", "user,id=net0",
+			"-device", "virtio-net-pci,netdev=net0,mac=52:54:00:63:5e:7a",
+			"-drive", "if=none,id=usb,file=/usbdrive,format=raw",
+			"-device", "usb-storage,drive=usb",
+			"-device", "qemu-xhci,id=xhci",
+			"-append", "kernel.serial=legacy infra.foo=bar"},
+	}, cmd, err)
 }
diff --git a/tools/zedmon/client/src/lib.rs b/tools/zedmon/client/src/lib.rs
index b1797bb..a2bf9b3 100644
--- a/tools/zedmon/client/src/lib.rs
+++ b/tools/zedmon/client/src/lib.rs
@@ -77,6 +77,121 @@
     power: f32,
 }
 
+struct DownsamplerState {
+    last_output_micros: u64,
+    prev_record: ZedmonRecord,
+    bus_voltage_integral: f32,
+    shunt_voltage_integral: f32,
+    power_integral: f32,
+}
+
+/// Helper struct for downsampling Zedmon data.
+struct Downsampler {
+    interval_micros: u64,
+    state: Option<DownsamplerState>,
+}
+
+impl Downsampler {
+    fn new(interval_micros: u64) -> Self {
+        Self { interval_micros, state: None }
+    }
+
+    /// Process a new record. If `record` completes a resampling interval, returns a ZedmonRecord
+    /// containing values averaged over the interval. Otherwise, returns None.
+    ///
+    /// The average value of y(t) over the interval [t_low, t_high] is computed using the definition
+    ///     avg(y(t), [t_low, t_high]) := 1 / (t_high - t_low) * \int_{t_low}^{t_high} y(s) ds.
+    /// As each record is processed, the integral is udpated over the previous time interval using
+    /// trapezoid rule integration, i.e.
+    ///     \int_{t_1}^{t_2} y(s) ds \approx (t2 - t1) * (y(t1) + y(t2)) / 2.
+    fn process(&mut self, record: ZedmonRecord) -> Option<ZedmonRecord> {
+        // Initialize self.state if it is unset; otherwise grab a reference to it.
+        let state = match self.state.as_mut() {
+            None => {
+                self.state = Some(DownsamplerState {
+                    last_output_micros: record.timestamp_micros,
+                    prev_record: record,
+                    bus_voltage_integral: 0.0,
+                    shunt_voltage_integral: 0.0,
+                    power_integral: 0.0,
+                });
+                return None;
+            }
+            Some(state) => state,
+        };
+
+        // If the latest raw data interval spans multiple output samples, skip output and
+        // reinitialize.
+        if record.timestamp_micros >= state.last_output_micros + 2 * self.interval_micros {
+            eprintln!(
+                "Raw data interval [{}, {}] contains multiple downsampling output times. Skipping \
+                output and reinitializing downsampling.",
+                state.last_output_micros, record.timestamp_micros
+            );
+            self.state = None;
+            return self.process(record);
+        }
+
+        let output_micros = state.last_output_micros + self.interval_micros;
+        let t1 = state.prev_record.timestamp_micros as f32;
+        let t2 = record.timestamp_micros as f32;
+
+        // No output this cycle -- update the state and return None.
+        if record.timestamp_micros < output_micros {
+            let dt_half = (t2 - t1) / 2.0;
+            state.shunt_voltage_integral +=
+                dt_half * (state.prev_record.shunt_voltage + record.shunt_voltage);
+            state.bus_voltage_integral +=
+                dt_half * (state.prev_record.bus_voltage + record.bus_voltage);
+            state.power_integral += dt_half * (state.prev_record.power + record.power);
+
+            state.prev_record = record;
+            return None;
+        }
+
+        let t_out = output_micros as f32;
+        let dt_half = (t_out - t1) / 2.0;
+
+        // Use linear interpolation to estimate the instantaneous value of each measured quantity at
+        // t_out.
+        let interpolate = |y1, y2| y1 + (t_out - t1) / (t2 - t1) * (y2 - y1);
+        let shunt_voltage_interp =
+            interpolate(state.prev_record.shunt_voltage, record.shunt_voltage);
+        let bus_voltage_interp = interpolate(state.prev_record.bus_voltage, record.bus_voltage);
+        let power_interp = interpolate(state.prev_record.power, record.power);
+
+        // For each measured quantity, use the previous value and the interpolated value to update
+        // its integral over [t1, t_out].
+        state.shunt_voltage_integral +=
+            dt_half * (state.prev_record.shunt_voltage + shunt_voltage_interp);
+        state.bus_voltage_integral +=
+            dt_half * (state.prev_record.bus_voltage + bus_voltage_interp);
+        state.power_integral += dt_half * (state.prev_record.power + power_interp);
+
+        // Divide each integral by the total length of the integration interval to get an average
+        // value of the measured quanity. This populates the output record.
+        let record_out = ZedmonRecord {
+            timestamp_micros: output_micros,
+            shunt_voltage: state.shunt_voltage_integral / (self.interval_micros as f32),
+            bus_voltage: state.bus_voltage_integral / (self.interval_micros as f32),
+            power: state.power_integral / (self.interval_micros as f32),
+        };
+
+        // Now integrate over [t_out, t2] to seed the integrals for the next output interval. In the
+        // edge case that t2 is at the reporting interval boundary, t2 - t_out == 0.0 and the
+        // integrals will be appropriately set to 0.0.
+        let dt_half = (t2 - t_out) / 2.0;
+        state.shunt_voltage_integral = dt_half * (shunt_voltage_interp + record.shunt_voltage);
+        state.bus_voltage_integral = dt_half * (bus_voltage_interp + record.bus_voltage);
+        state.power_integral = dt_half * (power_interp + record.power);
+
+        // Update remaining state and return.
+        state.last_output_micros = output_micros;
+        state.prev_record = record;
+        Some(record_out)
+    }
+}
+
 /// Interface to a Zedmon device.
 #[derive(Debug)]
 pub struct ZedmonClient<InterfaceType>
@@ -296,10 +411,12 @@
         shunt_resistance: f32,
         v_shunt_index: usize,
         v_bus_index: usize,
+        reporting_interval_micros: Option<u64>,
     ) -> std::thread::JoinHandle<Result<(), Error>> {
         std::thread::spawn(move || {
             // The CSV header is suppressed. Clients may query it by using `describe`.
             let mut writer = csv::WriterBuilder::new().has_headers(false).from_writer(writer);
+            let mut downsampler = reporting_interval_micros.map(|r| Downsampler::new(r));
 
             for buffer in packet_receiver.iter() {
                 let reports = parser.parse_reports(&buffer)?;
@@ -321,7 +438,22 @@
                         bus_voltage,
                         power: bus_voltage * shunt_voltage / shunt_resistance,
                     };
-                    writer.serialize(record)?;
+
+                    // Explicit flushing is performed when downsampling because typically the
+                    // reporting rate is relatively low, and the user may want to see the output in
+                    // realtime. Meanwhile, if not downsampling, there's sufficient output that
+                    // automatic flushing is fairly frequent. This behavior could be tuned if the
+                    // need arises.
+                    match downsampler.as_mut() {
+                        Some(downsampler) => match downsampler.process(record) {
+                            Some(r) => {
+                                writer.serialize(r)?;
+                                writer.flush()?;
+                            }
+                            None => {}
+                        },
+                        None => writer.serialize(record)?,
+                    }
 
                     if stopper.should_stop(report.timestamp_micros)? {
                         writer.flush()?;
@@ -388,6 +520,7 @@
         &self,
         writer: Box<dyn Write + Send>,
         stopper: impl StopSignal + Send + 'static,
+        reporting_interval: Option<Duration>,
     ) -> Result<(), Error> {
         // This function's workload is shared between its main thread and processing_thread.
         //
@@ -411,6 +544,7 @@
             self.shunt_resistance,
             self.v_shunt_index,
             self.v_bus_index,
+            reporting_interval.map(|d| d.as_micros() as u64),
         );
 
         let report_io_result = self.run_report_io(packet_sender);
@@ -989,6 +1123,7 @@
     fn run_zedmon_reporting<InterfaceType: usb_bulk::Open<InterfaceType> + Read + Write>(
         zedmon: &ZedmonClient<InterfaceType>,
         test_duration: Duration,
+        reporting_interval: Option<Duration>,
     ) -> Result<Vec<u8>, Error> {
         // Implements Write by sending bytes over a channel. The holder of the channel's
         // Receiver can then inspect the data that was written to test expectations.
@@ -1013,7 +1148,7 @@
 
         let (sender, receiver) = mpsc::channel();
         let writer = Box::new(ChannelWriter { sender, buffer: Vec::new() });
-        zedmon.read_reports(writer, DurationStopper::new(test_duration))?;
+        zedmon.read_reports(writer, DurationStopper::new(test_duration), reporting_interval)?;
 
         let mut output = Vec::new();
         while let Ok(mut buffer) = receiver.recv() {
@@ -1210,7 +1345,7 @@
 
             let zedmon = ZedmonClient::new(interface).expect("Error building ZedmonClient");
 
-            run_zedmon_reporting(&zedmon, test_duration)
+            run_zedmon_reporting(&zedmon, test_duration, None)
         };
 
         let max_failures = ZedmonClient::<TransientFailureInterface>::num_usb_read_retries();
@@ -1248,20 +1383,18 @@
             v_bus_scale: 0.025,
         };
         let test_duration = Duration::from_secs(10);
-        let reporting_interval = Duration::from_millis(1);
+        let raw_data_interval = Duration::from_millis(1);
 
         let report_queue =
-            make_report_queue(get_voltages, &device_config, test_duration, reporting_interval);
+            make_report_queue(get_voltages, &device_config, test_duration, raw_data_interval);
 
         let coordinator = fake_device::CoordinatorBuilder::new(device_config.clone())
             .with_report_queue(report_queue.clone())
             .build();
         let interface = fake_device::FakeZedmonInterface::new(coordinator);
-
         let zedmon = ZedmonClient::new(interface).expect("Error building ZedmonClient");
 
-        let output = run_zedmon_reporting(&zedmon, test_duration)?;
-
+        let output = run_zedmon_reporting(&zedmon, test_duration, None)?;
         let mut reader =
             csv::ReaderBuilder::new().has_headers(false).from_reader(output.as_slice());
 
@@ -1281,7 +1414,84 @@
             );
         }
 
-        assert_eq!(num_records, test_duration.as_millis() / reporting_interval.as_millis() + 1);
+        assert_eq!(num_records, test_duration.as_millis() / raw_data_interval.as_millis() + 1);
+
+        Ok(())
+    }
+
+    #[test]
+    fn test_read_reports_downsampled() -> Result<(), Error> {
+        // The voltages and device config are all completely made up for mathematical convenience.
+        // They are chosen so that v_shunt, v_bus, and power are easy to integrate analytically over
+        // the downsampling intervals.
+        fn get_voltages(micros: u64) -> (f32, f32) {
+            let seconds = micros as f32 / 1e6;
+            let v_shunt = seconds;
+            let v_bus = seconds.powi(2);
+            (v_shunt, v_bus)
+        }
+
+        let device_config = fake_device::DeviceConfiguration {
+            shunt_resistance: 2.0,
+            v_shunt_scale: 1e-4,
+            v_bus_scale: 1e-4,
+        };
+
+        let test_duration = Duration::from_secs(1);
+        let raw_data_interval = Duration::from_millis(1);
+        let reporting_interval = Duration::from_millis(100);
+
+        // Interval-average formulas follow from the definition
+        //   avg(y(t), [t_low, t_high]) := 1 / (t_high - t_low) * \int_{t_low}^{t_high} y(s) ds.
+        // Input times are in seconds.
+        let v_shunt_integral = |t1: f32, t2: f32| (t2.powi(2) - t1.powi(2)) / (2.0 * (t2 - t1));
+        let v_bus_integral = |t1: f32, t2: f32| (t2.powi(3) - t1.powi(3)) / (3.0 * (t2 - t1));
+        let power_integral = |t1: f32, t2: f32| {
+            (t2.powi(4) - t1.powi(4)) / (4.0 * device_config.shunt_resistance * (t2 - t1))
+        };
+
+        let report_queue =
+            make_report_queue(get_voltages, &device_config, test_duration, raw_data_interval);
+
+        let coordinator = fake_device::CoordinatorBuilder::new(device_config.clone())
+            .with_report_queue(report_queue.clone())
+            .build();
+        let interface = fake_device::FakeZedmonInterface::new(coordinator);
+        let zedmon = ZedmonClient::new(interface).expect("Error building ZedmonClient");
+
+        let output = run_zedmon_reporting(&zedmon, test_duration, Some(reporting_interval))?;
+        let mut reader =
+            csv::ReaderBuilder::new().has_headers(false).from_reader(output.as_slice());
+
+        // Both v_shunt and v_bus have a max value of 1. Since
+        //     power = v_shunt * v_bus / shunt_resistance,
+        // the maximum error in a raw power measurement should be roughly
+        //     (v_shunt_scale + v_bus_scale) / shunt_resistance.
+        let power_tolerance = (device_config.v_shunt_scale + device_config.v_bus_scale)
+            / device_config.shunt_resistance;
+
+        let mut num_records = 0;
+        let mut prev_timestamp = 0;
+        for result in reader.deserialize::<ZedmonRecord>() {
+            let record = result?;
+
+            num_records = num_records + 1;
+
+            let t = record.timestamp_micros as f32 / 1e6;
+            let t_prev = prev_timestamp as f32 / 1e6;
+
+            assert_near!(
+                record.shunt_voltage,
+                v_shunt_integral(t_prev, t),
+                device_config.v_shunt_scale
+            );
+            assert_near!(record.bus_voltage, v_bus_integral(t_prev, t), device_config.v_bus_scale);
+            assert_near!(record.power, power_integral(t_prev, t), power_tolerance);
+
+            prev_timestamp = record.timestamp_micros;
+        }
+
+        assert_eq!(num_records, test_duration.as_millis() / reporting_interval.as_millis());
 
         Ok(())
     }
@@ -1338,4 +1548,105 @@
 
         Ok(())
     }
+
+    #[test]
+    fn test_downsampler_nominal() {
+        // Total duration of the test scenario.
+        let duration_micros = 1_000_000;
+
+        // Interval of the raw signal (100 Hz).
+        let raw_interval_micros = 10_000;
+
+        // Downsample to 80 Hz. This lets us test both when the downsampling interval coincides with
+        // the end of a raw sample interval (t=25,000us, 50,000us, 75,000us, ...) and when it
+        // does not (t=12,500us, 37,500us, 62,500us, ...).
+        let downsampling_interval_micros = 12_500;
+        let mut downsampler = Downsampler::new(downsampling_interval_micros);
+
+        // Functions defining the raw and average values for each quantity. These are made up, and
+        // there is no relationship between the voltages and power in Downsampler's context.
+        //
+        // Interval-average formulas follow from the definition
+        //   avg(y(t), [t_low, t_high]) := 1 / (t_high - t_low) * \int_{t_low}^{t_high} y(s) ds.
+        let shunt_voltage_raw = |t: f32| t;
+        let shunt_voltage_average =
+            |t1: f32, t2: f32| (t2.powi(2) - t1.powi(2)) / (2.0 * (t2 - t1));
+        let bus_voltage_raw = |t: f32| t.powi(2);
+        let bus_voltage_average = |t1: f32, t2: f32| (t2.powi(3) - t1.powi(3)) / (3.0 * (t2 - t1));
+        let power_raw = |t: f32| t.powi(3);
+        let power_average = |t1: f32, t2: f32| (t2.powi(4) - t1.powi(4)) / (4.0 * (t2 - t1));
+
+        // Collect raw samples from t=0s to t=1s.
+        let mut t_micros = 0;
+        let mut records_out = Vec::new();
+        while t_micros <= duration_micros {
+            let t_sec = t_micros as f32 / 1e6;
+            let record = ZedmonRecord {
+                timestamp_micros: t_micros,
+                shunt_voltage: shunt_voltage_raw(t_sec),
+                bus_voltage: bus_voltage_raw(t_sec),
+                power: power_raw(t_sec),
+            };
+            match downsampler.process(record) {
+                Some(r) => records_out.push(r),
+                None => {}
+            }
+
+            t_micros += raw_interval_micros;
+        }
+
+        let dt_sec = downsampling_interval_micros as f32 / 1e6;
+
+        // Confirm expectations.
+        assert_eq!(records_out.len(), (duration_micros / downsampling_interval_micros) as usize);
+
+        for (i, record) in records_out.into_iter().enumerate() {
+            let expected_micros = ((i + 1) as u64) * downsampling_interval_micros;
+
+            // Endpoints of the interval for this sample, in seconds.
+            let t2 = expected_micros as f32 / 1e6;
+            let t1 = t2 - dt_sec;
+
+            assert_eq!(record.timestamp_micros, expected_micros);
+            assert_near!(record.shunt_voltage, shunt_voltage_average(t1, t2), 1e-4);
+            assert_near!(record.bus_voltage, bus_voltage_average(t1, t2), 1e-4);
+            assert_near!(record.power, power_average(t1, t2), 1e-4);
+        }
+    }
+
+    #[test]
+    fn test_downsampler_with_data_gap() {
+        // Run for 1 second with a raw data interval of 10ms and a downsampling interval of 100ms,
+        // with an outage in raw data from 230ms to 440ms, inclusive. Downsampling will skip outputs
+        // at 300ms and 400ms and reinitialize at 450ms, so we should see a gap in timestamps of the
+        // output samples between 200ms and 550ms.
+        let duration_micros = 1_000_000;
+        let raw_interval_micros = 10_000;
+        let downsampling_interval_micros = 100_000;
+        let raw_data_gap_micros = [230_000, 440_000];
+
+        let mut downsampler = Downsampler::new(downsampling_interval_micros);
+
+        let mut t_micros = 0;
+        let mut records_out = Vec::new();
+        while t_micros <= duration_micros {
+            if t_micros < raw_data_gap_micros[0] || t_micros > raw_data_gap_micros[1] {
+                let record = ZedmonRecord {
+                    timestamp_micros: t_micros,
+                    shunt_voltage: 1.0,
+                    bus_voltage: 1.0,
+                    power: 1.0,
+                };
+                match downsampler.process(record) {
+                    Some(r) => records_out.push(r),
+                    None => {}
+                }
+            }
+
+            t_micros += raw_interval_micros;
+        }
+
+        let timestamps: Vec<u64> = records_out.into_iter().map(|r| r.timestamp_micros).collect();
+        assert_eq!(timestamps, vec![100_000, 200_000, 550_000, 650_000, 750_000, 850_000, 950_000]);
+    }
 }
diff --git a/tools/zedmon/client/src/main.rs b/tools/zedmon/client/src/main.rs
index f00c861..5d24245 100644
--- a/tools/zedmon/client/src/main.rs
+++ b/tools/zedmon/client/src/main.rs
@@ -20,6 +20,9 @@
 /// Describes allowable values for the --duration arg of `record`.
 const DURATION_REGEX: &'static str = r"^(\d+)(h|m|s|ms)$";
 
+const ZEDMON_NOMINAL_DATA_RATE_HZ: u32 = 1500;
+const ZEDMON_NOMINAL_DATA_INTERVAL_USEC: f32 = 1e6 / ZEDMON_NOMINAL_DATA_RATE_HZ as f32;
+
 /// Validates the --duration arg of `record`.
 fn validate_duration(value: String) -> Result<(), String> {
     let re = regex::Regex::new(DURATION_REGEX).unwrap();
@@ -46,6 +49,16 @@
     }
 }
 
+fn validate_downsampling_interval(value: String) -> Result<(), String> {
+    validate_duration(value.clone())?;
+    let interval = parse_duration(&value);
+    if interval.as_secs_f32() * 1e6 > ZEDMON_NOMINAL_DATA_INTERVAL_USEC {
+        Ok(())
+    } else {
+        Err(format!("Value must be greater than {}us", ZEDMON_NOMINAL_DATA_INTERVAL_USEC))
+    }
+}
+
 fn main() -> Result<(), Error> {
     let matches = App::new("zedmon")
         .about("Utility for interacting with Zedmon power measurement device")
@@ -74,19 +87,60 @@
                     .long("out")
                     .takes_value(true)
             ).arg(
+                Arg::with_name("average")
+                    .help(
+                        &format!(
+                            "Specifies that the client will output exactly one record, which \
+                            averages data over the specified duration. This is equivalent to \
+                            setting --duration and --interval to the same value. If specified, \
+                            must match the regular expression '{}'.",
+                            DURATION_REGEX)
+                        )
+                    .short("a")
+                    .long("average")
+                    .takes_value(true)
+                    .value_name("duration")
+                    .validator(&validate_duration)
+                    .conflicts_with_all(&["duration", "interval"]),
+            ).arg(
                 Arg::with_name("duration")
                     .help(
                         &format!(
                             "Duration of time on the Zedmon device to be spanned by data \
                             recording. If omitted, recording will continue until ENTER is pressed. \
-                            If specified, must match the regular expression {}",
+                            If specified, must match the regular expression '{}'.",
                             DURATION_REGEX)
                         )
                     .short("d")
                     .long("duration")
                     .takes_value(true)
-                    .validator(&validate_duration),
-            ),
+                    .validator(&validate_duration)
+                    .conflicts_with("average"),
+            ).arg(
+                Arg::with_name("interval")
+                    .help(
+                        &format!(
+                            "Interval at which to report data. Raw measurements from Zedmon will \
+                            be averaged at this interval. \
+                            \n  If --interval is omitted, each sample will be reported. If \
+                            specified, it must match the regular expression '{}'. It must also be \
+                            greater than {:.1}us, Zemdon's nominal reporting interval (corresponding \
+                            to {} Hz). \
+                            \n  If a gap in raw data contains multiple downsampling output times, \
+                            then no samples will be emitted during the gap, and the downsampling \
+                            process will reinitialize with the end of the gap as its starting \
+                            point.",
+                            DURATION_REGEX,
+                            ZEDMON_NOMINAL_DATA_INTERVAL_USEC,
+                            ZEDMON_NOMINAL_DATA_RATE_HZ)
+                        )
+                    .short("i")
+                    .long("interval")
+                    .takes_value(true)
+                    .value_name("duration")
+                    .validator(&validate_downsampling_interval)
+                    .conflicts_with("average"),
+            )
         )
         .subcommand(
             SubCommand::with_name("relay").about("Enables/disables relay").arg(
@@ -178,6 +232,7 @@
 
 /// Runs the "record" subcommand".
 fn run_record(arg_matches: &ArgMatches<'_>) -> Result<(), Error> {
+    // Parse --out.
     let (output, dest_name): (Box<dyn Write + Send>, &str) = match arg_matches.value_of("out") {
         None => (Box::new(File::create("zedmon.csv")?), "zedmon.csv"),
         Some("-") => (Box::new(std::io::stdout()), "stdout"),
@@ -185,6 +240,18 @@
     };
     let dest_name = dest_name.to_string();
 
+    // Parse either --average or --duration and --interval.
+    let (duration, reporting_interval) = match arg_matches.value_of("average") {
+        Some(value) => {
+            let duration = parse_duration(value);
+            (Some(duration), Some(duration))
+        }
+        None => (
+            arg_matches.value_of("duration").map(parse_duration),
+            arg_matches.value_of("interval").map(parse_duration),
+        ),
+    };
+
     let zedmon = lib::zedmon();
 
     // TODO(fxbug.dev/61471): Consider incorporating the time offset directly into report
@@ -193,14 +260,13 @@
     println!("Time offset: {}ns ± {}ns\n", offset, uncertainty);
 
     println!("Recording to {}.", dest_name);
-    match arg_matches.value_of("duration") {
-        Some(value) => {
-            let duration = parse_duration(value);
-            zedmon.read_reports(output, lib::DurationStopper::new(duration))
+    match duration {
+        Some(duration) => {
+            zedmon.read_reports(output, lib::DurationStopper::new(duration), reporting_interval)
         }
         None => {
             println!("Press ENTER to stop.");
-            zedmon.read_reports(output, StdinStopper::new())
+            zedmon.read_reports(output, StdinStopper::new(), reporting_interval)
         }
     }
 }
diff --git a/tools/zedmon/client/test/zedmon_client_manual_test.dart b/tools/zedmon/client/test/zedmon_client_manual_test.dart
index 330c299..b6fc3f7 100644
--- a/tools/zedmon/client/test/zedmon_client_manual_test.dart
+++ b/tools/zedmon/client/test/zedmon_client_manual_test.dart
@@ -3,7 +3,7 @@
 // found in the LICENSE file.
 
 import 'dart:async';
-import 'dart:convert' show jsonDecode, LineSplitter, utf8;
+import 'dart:convert' show jsonDecode;
 import 'dart:core';
 import 'dart:io' show Directory, File, Platform, Process, sleep;
 
@@ -55,57 +55,30 @@
       csvFormat.indexOf('power'));
 }
 
-void validateZedmonCsvLine(String csvLine, ZedmonDescription desc) {
+void validateZedmonCsvLine(
+    String csvLine, ZedmonDescription desc, double powerTolerance) {
   final parts = csvLine.split(',');
   final shuntVoltage = double.parse(parts[desc.shuntVoltageIndex]);
   final busVoltage = double.parse(parts[desc.busVoltageIndex]);
   final power = double.parse(parts[desc.powerIndex]);
 
-  expect(
-      busVoltage * shuntVoltage / desc.shuntResistance, closeTo(power, 1e-4));
+  expect(busVoltage * shuntVoltage / desc.shuntResistance,
+      closeTo(power, powerTolerance));
 }
 
-// Returns the average power measured by `zedmon record` over the specified
-// number of seconds.
+// Returns the average power measured by `zedmon record --average` for the
+// specified number of seconds.
 Future<double> measureAveragePower(String zedmonPath, String tempFilePath,
     ZedmonDescription desc, int seconds) async {
   final result = await Process.run(zedmonPath,
-      ['record', '--out', tempFilePath, '--duration', '${seconds}s']);
-  expect(result.exitCode, 0);
+      ['record', '--out', tempFilePath, '--average', '${seconds}s']);
+  expect(result.exitCode, equals(0));
 
-  bool initialized = false;
+  final lines = await File(tempFilePath).readAsLines();
+  expect(lines.length, equals(1));
 
-  int firstTimestampMicros = 0;
-  int prevTimestampMicros = 0;
-  double prevPower = 0.0;
-  double totalEnergy = 0.0;
-
-  await for (String line in File(tempFilePath)
-      .openRead()
-      .transform(utf8.decoder)
-      .transform(LineSplitter())) {
-    final parts = line.split(',');
-    final timestampMicros = int.parse(parts[desc.timestampIndex]);
-    final power = double.parse(parts[desc.powerIndex]);
-
-    if (initialized) {
-      final dt = timestampMicros - prevTimestampMicros;
-
-      // Use the trapezoid rule to estimate energy consumed since the previous
-      // sample.
-      totalEnergy += dt * (power + prevPower) / 2.0;
-
-      prevTimestampMicros = timestampMicros;
-      prevPower = power;
-    } else {
-      initialized = true;
-      firstTimestampMicros = timestampMicros;
-    }
-
-    prevTimestampMicros = timestampMicros;
-    prevPower = power;
-  }
-  return totalEnergy / (prevTimestampMicros - firstTimestampMicros);
+  final parts = lines[0].split(',');
+  return double.parse(parts[desc.powerIndex]);
 }
 
 // In order to run these tests, the host should be connected to exactly one
@@ -143,10 +116,10 @@
   // `zedmon list` should yield exactly one word, containing a serial number.
   test('zedmon list', () async {
     final result = await Process.run(zedmonPath, ['list']);
-    expect(result.exitCode, 0);
+    expect(result.exitCode, equals(0));
 
     final regex = RegExp(r'\W+');
-    expect(regex.allMatches(result.stdout).length, 1);
+    expect(regex.allMatches(result.stdout).length, equals(1));
   });
 
   // Records 1 second of Zedmon data and validates the power calculation for
@@ -154,31 +127,59 @@
   test('zedmon record', () async {
     final result = await Process.run(
         zedmonPath, ['record', '--out', tempFilePath, '--duration', '1s']);
-    expect(result.exitCode, 0);
+    expect(result.exitCode, equals(0));
 
-    var csvLinesRead = 0;
-    await for (String line in File(tempFilePath)
-        .openRead()
-        .transform(utf8.decoder)
-        .transform(LineSplitter())) {
-      validateZedmonCsvLine(line, zedmonDescription);
-      csvLinesRead += 1;
+    final lines = await File(tempFilePath).readAsLines();
+
+    // Zedmon's nominal output rate is about 1500 Hz. Expecting 1400 lines
+    // gives a bit of buffer for packet loss; see fxbug.dev/64161.
+    expect(lines.length, greaterThan(1400));
+
+    for (String line in lines) {
+      validateZedmonCsvLine(line, zedmonDescription, 1e-4);
     }
+  });
 
-    expect(csvLinesRead, greaterThan(1000));
+  test('zedmon record downsampled', () async {
+    final result = await Process.run(zedmonPath, [
+      'record',
+      '--out',
+      tempFilePath,
+      '--duration',
+      '1s',
+      '--interval',
+      '100ms'
+    ]);
+    expect(result.exitCode, equals(0));
+
+    final lines = await File(tempFilePath).readAsLines();
+
+    // We should see exactly 10 records, given a 100ms reporting interval over
+    // a 1s duration. (Zedmon's nominal reporting interval is ~667us, so we'd
+    // have to miss many consecutive packets before a downsampled packet is
+    // skipped, and that would indicate a problem worth investigating.)
+    expect(lines.length, equals(10));
+
+    for (String line in lines) {
+      // Power in each output record is averaged from the power derived from
+      // each sample rather than computed from average shunt voltage and
+      // average bus power. Consequently, the tolerance in the power calculation
+      // needs to be higher here.
+      validateZedmonCsvLine(line, zedmonDescription, 0.01);
+    }
   });
 
   // Tests that the 5-second average power drops by at least 99% when the
   // relay is turned off.
   test('zedmon relay', () async {
     var result = await Process.run(zedmonPath, ['relay', 'off']);
-    expect(result.exitCode, 0);
+    expect(result.exitCode, equals(0));
     sleep(Duration(seconds: 1));
     final offPower = await measureAveragePower(
         zedmonPath, tempFilePath, zedmonDescription, 5);
 
     result = await Process.run(zedmonPath, ['relay', 'on']);
-    expect(result.exitCode, 0);
+    expect(result.exitCode, equals(0));
     sleep(Duration(seconds: 1));
     final onPower = await measureAveragePower(
         zedmonPath, tempFilePath, zedmonDescription, 5);
diff --git a/zircon/kernel/phys/main.h b/zircon/kernel/phys/main.h
index 4462851..f6750574 100644
--- a/zircon/kernel/phys/main.h
+++ b/zircon/kernel/phys/main.h
@@ -43,7 +43,9 @@
 // executable defines.  It can use printf (and stdout generally) freely.
 [[noreturn]] void ZbiMain(void* zbi, arch::EarlyTicks) PHYS_SINGLETHREAD;
 
+// These are defined by the linker script.
 extern "C" __LOCAL const char PHYS_LOAD_ADDRESS[];  // Address this file was loaded into memory.
+extern "C" __LOCAL const char _end[];               // End of the image, including ".bss"
 
 // Apply any relocations to our binary.
 //
diff --git a/zircon/kernel/phys/symbolize.cc b/zircon/kernel/phys/symbolize.cc
index 913aae5..2b7480e5 100644
--- a/zircon/kernel/phys/symbolize.cc
+++ b/zircon/kernel/phys/symbolize.cc
@@ -47,7 +47,6 @@
 
 // These are defined by the linker script.
 extern "C" void __code_start();
-extern "C" char _end[];
 extern "C" const BuildIdNote __start_note_gnu_build_id[];
 extern "C" const BuildIdNote __stop_note_gnu_build_id[];
 
diff --git a/zircon/kernel/phys/test/BUILD.zircon.gn b/zircon/kernel/phys/test/BUILD.zircon.gn
index 2793c99..26dec91 100644
--- a/zircon/kernel/phys/test/BUILD.zircon.gn
+++ b/zircon/kernel/phys/test/BUILD.zircon.gn
@@ -224,6 +224,7 @@
       ":test-main",
       "$zx/kernel/lib/ktl",
       "$zx/kernel/lib/libc",
+      "$zx/kernel/phys/lib/memalloc",
       "$zx/system/ulib/zbitl",
     ]
   }
diff --git a/zircon/kernel/phys/test/phys-memory-test.cc b/zircon/kernel/phys/test/phys-memory-test.cc
index d301ee9..9cb388e 100644
--- a/zircon/kernel/phys/test/phys-memory-test.cc
+++ b/zircon/kernel/phys/test/phys-memory-test.cc
@@ -5,11 +5,15 @@
 // https://opensource.org/licenses/MIT
 
 #include <inttypes.h>
+#include <lib/memalloc.h>
 #include <lib/zbitl/items/mem_config.h>
+#include <lib/zx/status.h>
 #include <stdio.h>
 #include <string.h>
 #include <zircon/assert.h>
+#include <zircon/limits.h>
 
+#include <fbl/algorithm.h>
 #include <ktl/byte.h>
 #include <ktl/span.h>
 
@@ -19,6 +23,9 @@
 
 namespace {
 
+constexpr uint64_t kMiB [[maybe_unused]] = 1024 * 1024;
+constexpr uint64_t kGiB [[maybe_unused]] = 1024 * 1024 * 1024;
+
 // Convert a zbi_mem_range_t memory type into a human-readable string.
 const char* RangeTypeString(uint32_t type) {
   switch (type) {
@@ -33,6 +40,64 @@
   }
 }
 
+// Allocate and overwrite all RAM from the given memalloc::Allocator.
+//
+// Return the number of bytes that were in the allocator.
+uint64_t AllocateAndOverwriteFreeMemory(memalloc::Allocator* allocator) {
+  uint64_t bytes_allocated = 0;
+
+  // To avoid having to call into the allocator too many times, we start
+  // trying to do large allocations, and gradually ask for less and less
+  // memory as the larger allocations fail.
+  uint64_t allocation_size = kMiB;  // start with 1MiB allocations.
+  while (allocation_size > 0) {
+    // Allocate some memory.
+    zx::status<uint64_t> result = allocator->Allocate(allocation_size);
+    if (result.is_error()) {
+      allocation_size /= 2;
+      continue;
+    }
+    bytes_allocated += allocation_size;
+
+    // Overwrite the memory.
+    //
+    // TODO(dgreenaway): We are currently running uncached on ARM64, which has
+    // a memcpy throughput of ~5MiB/s (!). We only overwrite a small amount of
+    // RAM to avoid the copy taking to long on systems with large amounts of RAM.
+    constexpr uint64_t kMaxOverwrite = 64 * kMiB;
+    auto* bytes = reinterpret_cast<std::byte*>(result.value());
+    if (bytes_allocated < kMaxOverwrite) {
+      memset(bytes, 0x33, static_cast<size_t>(allocation_size));
+    }
+  }
+
+  return bytes_allocated;
+}
+
+// Remove architecture-specific regions of memory.
+void ArchRemoveReservedRanges(memalloc::Allocator* allocator) {
+#if defined(__x86_64__)
+  // On x86, remove space likely to be holding our page tables.
+  //
+  // TODO(dgreenaway): We assume here that the page tables are contiguously
+  // allocated, starting at CR3, and all fitting within 1MiB. We should remove
+  // these assumptions.
+  {
+    // Get top-level page directory location, stored in the CR3 register.
+    uint64_t cr3;
+    __asm__("mov %%cr3, %0\n\t" : "=r"(cr3));
+
+    // Remove the range.
+    zx::status<> result = allocator->RemoveRange(cr3, 1 * kMiB);
+    ZX_ASSERT(result.is_ok());
+  }
+
+  // On x86-64, remove space unlikely to be mapped into our address space (anything past 1 GiB).
+  zx::status<> result = allocator->RemoveRange(1 * kGiB, UINT64_MAX - 1 * kGiB + 1);
+  ZX_ASSERT(result.is_ok());
+#endif
+}
+
 }  // namespace
 
 int TestMain(void* zbi_ptr, arch::EarlyTicks ticks) {
@@ -44,10 +109,10 @@
     printf("No ZBI found. Skipping test...\n");
     return 0;
   }
+  zbitl::View<zbitl::ByteView> view({static_cast<ktl::byte*>(zbi_ptr), SIZE_MAX});
 
   // Print memory information.
-  zbitl::MemRangeTable container{
-      zbitl::View<zbitl::ByteView>({static_cast<ktl::byte*>(zbi_ptr), ktl::dynamic_extent})};
+  zbitl::MemRangeTable container{view};
   printf("Memory ranges detected:\n");
   size_t count = 0;
   for (const auto& range : container) {
@@ -72,5 +137,57 @@
     return 1;
   }
 
+  // Add all memory claimed to be free to the allocator.
+  constexpr size_t kMaxRanges = 32;
+  memalloc::Range ranges[kMaxRanges];
+  static_assert(sizeof(ranges) <= 1024, "`ranges` too large for stack.");
+  memalloc::Allocator allocator(ranges);
+  for (const auto& range : container) {
+    // Ignore reserved memory on our first pass.
+    if (range.type != ZBI_MEM_RANGE_RAM) {
+      continue;
+    }
+    zx::status<> result = allocator.AddRange(range.paddr, range.length);
+    ZX_ASSERT(result.is_ok());
+  }
+  ZX_ASSERT(container.take_error().is_ok());
+
+  // Remove any memory region marked as reserved.
+  for (const auto& range : container) {
+    if (range.type != ZBI_MEM_RANGE_RESERVED) {
+      continue;
+    }
+    zx::status<> result = allocator.RemoveRange(range.paddr, range.length);
+    ZX_ASSERT(result.is_ok());
+  }
+  ZX_ASSERT(container.take_error().is_ok());
+
+  // Remove our code from the range of useable memory.
+  auto start = reinterpret_cast<uint64_t>(&PHYS_LOAD_ADDRESS);
+  auto end = reinterpret_cast<uint64_t>(&_end);
+  ZX_ASSERT(allocator.RemoveRange(start, /*size=*/end - start).is_ok());
+
+  // Remove space occupied by the ZBI.
+  zx::status<> result =
+      allocator.RemoveRange(reinterpret_cast<uint64_t>(view.storage().data()), view.size_bytes());
+  ZX_ASSERT(result.is_ok());
+
+  // Remove any arch-specific reserved ranges.
+  ArchRemoveReservedRanges(&allocator);
+
+  // Remove the zero byte, to avoid confusion with nullptr.
+  result = allocator.RemoveRange(0, 1);
+  ZX_ASSERT(result.is_ok());
+
+  // Ensure we can allocate all the reamining RAM and overwrite it.
+  uint64_t bytes_allocated = AllocateAndOverwriteFreeMemory(&allocator);
+
+  // Print the number of bytes allocated, and ensure we found at least 1 byte of free memory.
+  printf("Detected %10" PRIu64 " kiB of free memory.",
+         static_cast<uint64_t>(bytes_allocated / 1024));
+  if (bytes_allocated == 0) {
+    return 1;
+  }
+
   return 0;
 }
diff --git a/zircon/public/sysroot/BUILD.gn b/zircon/public/sysroot/BUILD.gn
index 0f1c101..84dc482 100644
--- a/zircon/public/sysroot/BUILD.gn
+++ b/zircon/public/sysroot/BUILD.gn
@@ -254,6 +254,9 @@
     # The parent group() that depends on everything else.
     group(target_name) {
       deps = _sysroot_deps
+      if (defined(invoker.deps)) {
+        deps += invoker.deps
+      }
     }
   }
 
@@ -268,6 +271,7 @@
   #
   _create_sysroot("cpp_binary_deps") {
     sysroot_dir = cpp_sysroot_dir
+    deps = [ "//zircon/system/ulib/c" ]
   }
 
   # Rust compilation can now use the C++ sysroot directly, so just
@@ -287,6 +291,7 @@
     sysroot_dir = go_sysroot_dir
     add_crt1 = true
     absolute_stub_paths = true
+    deps = [ "//zircon/system/ulib/c" ]
   }
 
   # Depend on this to only require the Zircon headers, but not link to the
diff --git a/zircon/system/LICENSE b/zircon/system/LICENSE
index 1353170..7099f4b 100644
--- a/zircon/system/LICENSE
+++ b/zircon/system/LICENSE
@@ -10,9 +10,6 @@
      copyright notice, this list of conditions and the following
      disclaimer in the documentation and/or other materials provided
      with the distribution.
-   * Neither the name of Google Inc. nor the names of its
-     contributors may be used to endorse or promote products derived
-     from this software without specific prior written permission.
 
 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
diff --git a/zircon/system/ulib/c/libc_toolchain.gni b/zircon/system/ulib/c/libc_toolchain.gni
index b475675..4515e20 100644
--- a/zircon/system/ulib/c/libc_toolchain.gni
+++ b/zircon/system/ulib/c/libc_toolchain.gni
@@ -121,16 +121,30 @@
 
 _system_target_variant = _uninstrumented_target_variant
 
-# This is the toolchain used to build the C library for regular Fuchsia packages.
-# If an instrumented toolchain is used, it will be installed as lib/<variant>/ld.so.1
-# as expected by other user-level binaries.
-sysroot_libc_toolchain = sysroot_libc_base_toolchain + _user_target_variant
-
 # This is the toolchain used to build the C library that appears in the system package,
 # i.e. that is installed under /lib/ld.so.1. It must be a non-instrumented version of
 # the library, though a variant like "gcc" might be applied to it.
 system_libc_toolchain = sysroot_libc_base_toolchain + _system_target_variant
 
+if (is_fuchsia) {
+  # When in a toolchain variant of the Fuchsia base toolchain, find the
+  # corresponding toolchain variant of user.libc_$target_cpu to build
+  # the C library with it.
+  sysroot_libc_toolchain = sysroot_libc_base_toolchain
+  if (toolchain_variant.suffix != "") {
+    sysroot_libc_toolchain =
+        sysroot_libc_base_toolchain +
+        string_replace(toolchain_variant.suffix, "-fuzzer", "")
+  } else {
+    sysroot_libc_toolchain = system_libc_toolchain
+  }
+} else {
+  # This is the toolchain used to build the C library for regular Fuchsia packages.
+  # If an instrumented toolchain is used, it will be installed as lib/<variant>/ld.so.1
+  # as expected by other user-level binaries.
+  sysroot_libc_toolchain = sysroot_libc_base_toolchain + _user_target_variant
+}
+
 # The GN target that builds the C library for the current variant selection.
 sysroot_libc_target = "$sysroot_libc_label($sysroot_libc_toolchain)"
 
@@ -260,34 +274,4 @@
 #   do so for now. In the future, it might be possible to use the right
 #   instrumented C library at link time instead.
 #
-#   select_variant = []
-#     No variant is selected. Both libraries are the same binary, installed
-#     as lib/ld.so.1 everywhere,
-#
-# - The fact that the C library can be compiled with GCC, but that
-#   instrumented executables require a C library built in the same
-#   variant toolchain instance means that variant selection for the
-#   C library must happen in a slightly different way than for
-#   regular targets. Consider the following four cases:
-#
-#   select_variant = [ "gcc" ]
-#     The system C library is built with GCC, and installed as lib/ld.so.1
-#     everywhere. Note that the "gcc" selector only applies to zircon-specific
-#     toolchains (e.g. the ones used to build the C library, the Zircon VDSO,
-#     or the Zircon kernel), but not to the toolchain(s) used to build Fuchsia
-#     user binaries. This is intentional (since a lot of Fuchsia user code does
-#     not compile with GCC without errors or warnings-as-errors).
-#
-#   select_variant = [ "asan", "gcc" ]
-#     Variant selection stops at the first variant in the list that matches
-#     the current target. In this specific case, "asan" will always match
-#     for the C library and Fuchsia user binaries, and the "gcc" variant
-#     would be ignored. However, the system C library, which ignores
-#     instrumented variants, will be built with GCC nonetheless.
-#
-#   select_variant = [ "gcc", "asan" ]
-#     Similarly, the "gcc" selector applies to the C library target,
-#     but not to other Fuchsia user binaries. The system C library will
-#     also be built with GCC.
-#
 
diff --git a/zircon/system/ulib/fidl/llcpp_client_base.cc b/zircon/system/ulib/fidl/llcpp_client_base.cc
index fb186bc1..ae41e37 100644
--- a/zircon/system/ulib/fidl/llcpp_client_base.cc
+++ b/zircon/system/ulib/fidl/llcpp_client_base.cc
@@ -45,11 +45,9 @@
   do {
     do {
       context->txid_ = ++txid_base_ & kUserspaceTxidMask;  // txid must be within mask.
-    } while (!context->txid_);                             // txid must be non-zero.
-  } while (contexts_.find(context->txid_) != contexts_.end());
+    } while (unlikely(!context->txid_));                   // txid must be non-zero.
+  } while (unlikely(!contexts_.insert_or_find(context)));
 
-  // Insert the ResponseContext.
-  contexts_.insert(context);
   list_add_tail(&delete_list_, context);
 }
 
@@ -80,7 +78,7 @@
 std::optional<UnbindInfo> ClientBase::Dispatch(fidl_incoming_msg_t* msg) {
   auto* hdr = reinterpret_cast<fidl_message_header_t*>(msg->bytes);
 
-  if (hdr->ordinal == kFidlOrdinalEpitaph) {
+  if (unlikely(hdr->ordinal == kFidlOrdinalEpitaph)) {
     FidlHandleInfoCloseMany(msg->handles, msg->num_handles);
     if (hdr->txid != 0) {
       return UnbindInfo{UnbindInfo::kUnexpectedMessage, ZX_ERR_INVALID_ARGS};
@@ -93,10 +91,8 @@
     ResponseContext* context = nullptr;
     {
       std::scoped_lock lock(lock_);
-      auto it = contexts_.find(hdr->txid);
-      if (it != contexts_.end()) {
-        context = &(*it);
-        contexts_.erase(it);
+      context = contexts_.erase(hdr->txid);
+      if (likely(context != nullptr)) {
         list_delete(static_cast<list_node_t*>(context));
       } else {
         fprintf(stderr, "%s: Received response for unknown txid %u.\n", __func__, hdr->txid);
@@ -109,7 +105,7 @@
     zx_status_t status = fidl_decode_etc(context->type(), msg->bytes, msg->num_bytes, msg->handles,
                                          msg->num_handles, &error_message);
     fidl_trace(DidLLCPPDecode);
-    if (status != ZX_OK) {
+    if (unlikely(status != ZX_OK)) {
       context->OnError();
       return UnbindInfo{UnbindInfo::kDecodeError, status};
     }
@@ -133,7 +129,7 @@
   {
     std::scoped_lock lock(lock_);
     // Ensure that only one thread receives the channel.
-    if (!channel_)
+    if (unlikely(!channel_))
       return zx::channel();
     ephemeral_channel_ref = std::move(channel_);
   }
diff --git a/zircon/system/ulib/fs/directory_connection.cc b/zircon/system/ulib/fs/directory_connection.cc
index 1e17f5a..ddea7c1 100644
--- a/zircon/system/ulib/fs/directory_connection.cc
+++ b/zircon/system/ulib/fs/directory_connection.cc
@@ -216,7 +216,7 @@
     completer.Reply(ZX_ERR_BAD_HANDLE);
     return;
   }
-  dircookie_.Reset();
+  dircookie_ = VdirCookie();
   completer.Reply(ZX_OK);
 }
 
diff --git a/zircon/system/ulib/fs/include/fs/internal/directory_connection.h b/zircon/system/ulib/fs/include/fs/internal/directory_connection.h
index 1ac71c4..05ad7a6f 100644
--- a/zircon/system/ulib/fs/include/fs/internal/directory_connection.h
+++ b/zircon/system/ulib/fs/include/fs/internal/directory_connection.h
@@ -74,7 +74,7 @@
   void GetDevicePath(GetDevicePathCompleter::Sync& completer) final;
 
   // Directory cookie for readdir operations.
-  fs::vdircookie_t dircookie_{};
+  fs::VdirCookie dircookie_;
 };
 
 }  // namespace internal
diff --git a/zircon/system/ulib/fs/include/fs/lazy_dir.h b/zircon/system/ulib/fs/include/fs/lazy_dir.h
index c47f987..c055bf1 100644
--- a/zircon/system/ulib/fs/include/fs/lazy_dir.h
+++ b/zircon/system/ulib/fs/include/fs/lazy_dir.h
@@ -40,7 +40,7 @@
   zx_status_t GetAttributes(fs::VnodeAttributes* out_attr) final;
   // Read the directory contents. Note that cookie->p is used to denote
   // if the "." entry has been returned. All IDs other than 0 are valid.
-  zx_status_t Readdir(vdircookie_t* cookie, void* dirents, size_t len, size_t* out_actual) final;
+  zx_status_t Readdir(VdirCookie* cookie, void* dirents, size_t len, size_t* out_actual) final;
   zx_status_t Lookup(fbl::StringPiece name, fbl::RefPtr<fs::Vnode>* out_vnode) final;
   zx_status_t GetNodeInfoForProtocol(VnodeProtocol protocol, Rights rights,
                                      VnodeRepresentation* info) final;
diff --git a/zircon/system/ulib/fs/include/fs/pseudo_dir.h b/zircon/system/ulib/fs/include/fs/pseudo_dir.h
index dd654bc..2cfc19e 100644
--- a/zircon/system/ulib/fs/include/fs/pseudo_dir.h
+++ b/zircon/system/ulib/fs/include/fs/pseudo_dir.h
@@ -72,7 +72,7 @@
   zx_status_t Lookup(fbl::StringPiece name, fbl::RefPtr<fs::Vnode>* out) final;
   void Notify(fbl::StringPiece name, unsigned event) final;
   zx_status_t WatchDir(fs::Vfs* vfs, uint32_t mask, uint32_t options, zx::channel watcher) final;
-  zx_status_t Readdir(vdircookie_t* cookie, void* dirents, size_t len, size_t* out_actual) final;
+  zx_status_t Readdir(VdirCookie* cookie, void* dirents, size_t len, size_t* out_actual) final;
   zx_status_t GetNodeInfoForProtocol(VnodeProtocol protocol, Rights rights,
                                      VnodeRepresentation* info) final;
 
diff --git a/zircon/system/ulib/fs/include/fs/vfs.h b/zircon/system/ulib/fs/include/fs/vfs.h
index 4e516f4..d02e18d 100644
--- a/zircon/system/ulib/fs/include/fs/vfs.h
+++ b/zircon/system/ulib/fs/include/fs/vfs.h
@@ -54,109 +54,40 @@
 
 class Vnode;
 
-// A storage class for a vdircookie which is passed to Readdir.
-// Common vnode implementations may use this struct as scratch
-// space, or cast it to an alternative structure of the same
-// size (or smaller).
+// A storage class for a vdircookie which is passed to Readdir. Common vnode implementations may use
+// this struct as scratch space, or cast it to an alternative structure of the same size (or
+// smaller).
 //
-// TODO(smklein): To implement seekdir and telldir, the size
-// of this vdircookie may need to shrink to a 'long'.
-struct vdircookie_t {
-  void Reset() { memset(this, 0, sizeof(vdircookie_t)); }
-
-  uint64_t n;
-  void* p;
+// TODO(smklein): To implement seekdir and telldir, the size of this vdircookie may need to shrink
+// to a 'long'.
+struct VdirCookie {
+  uint64_t n = 0;
+  void* p = nullptr;
 };
 
-// The Vfs object contains global per-filesystem state, which
-// may be valid across a collection of Vnodes.
+// The Vfs object contains global per-filesystem state, which may be valid across a collection of
+// Vnodes.
 //
 // The Vfs object must outlive the Vnodes which it serves.
 //
 // This class is thread-safe.
 class Vfs {
  public:
+  class OpenResult;
+
   Vfs();
+
+#ifdef __Fuchsia__
+  explicit Vfs(async_dispatcher_t* dispatcher);
+#endif  // __Fuchsia__
+
   virtual ~Vfs();
 
-  class OpenResult {
-   public:
-    // When this variant is active, the indicated error occurred.
-    using Error = zx_status_t;
-
-#ifdef __Fuchsia__
-    // When this variant is active, the path being opened contains a remote node.
-    // |path| is the remaining portion of the path yet to be traversed.
-    // The caller should forward the remainder of this open request to that vnode.
-    struct Remote {
-      fbl::RefPtr<Vnode> vnode;
-      fbl::StringPiece path;
-    };
-
-    // When this variant is active, the path being opened is a remote node itself.
-    // The caller should clone the connection associated with this vnode.
-    struct RemoteRoot {
-      fbl::RefPtr<Vnode> vnode;
-    };
-#endif  // __Fuchsia__
-
-    // When this variant is active, |Open| has successfully reached a vnode under
-    // this filesystem. |validated_options| contains options to be used on the new
-    // connection, potentially adjusted for posix-flag rights expansion.
-    struct Ok {
-      fbl::RefPtr<Vnode> vnode;
-      Vnode::ValidatedOptions validated_options;
-    };
-
-    // Forwards the constructor arguments into the underlying |std::variant|.
-    // This allows |OpenResult| to be constructed directly from one of the variants, e.g.
-    //
-    //     OpenResult r = OpenResult::Error{ZX_ERR_ACCESS_DENIED};
-    //
-    template <typename T>
-    OpenResult(T&& v) : variants_(std::forward<T>(v)) {}
-
-    // Applies the |visitor| function to the variant payload. It simply forwards the visitor into
-    // the underlying |std::variant|. Returns the return value of |visitor|.
-    // Refer to C++ documentation for |std::visit|.
-    template <class Visitor>
-    constexpr auto visit(Visitor&& visitor) -> decltype(visitor(std::declval<zx_status_t>())) {
-      return std::visit(std::forward<Visitor>(visitor), variants_);
-    }
-
-    Error& error() { return std::get<Error>(variants_); }
-
-    bool is_error() const { return std::holds_alternative<Error>(variants_); }
-
-#ifdef __Fuchsia__
-    Remote& remote() { return std::get<Remote>(variants_); }
-
-    bool is_remote() const { return std::holds_alternative<Remote>(variants_); }
-
-    RemoteRoot& remote_root() { return std::get<RemoteRoot>(variants_); }
-
-    bool is_remote_root() const { return std::holds_alternative<RemoteRoot>(variants_); }
-#endif  // __Fuchsia__
-
-    Ok& ok() { return std::get<Ok>(variants_); }
-
-    bool is_ok() const { return std::holds_alternative<Ok>(variants_); }
-
-   private:
-#ifdef __Fuchsia__
-    using Variants = std::variant<Error, Remote, RemoteRoot, Ok>;
-#else
-    using Variants = std::variant<Error, Ok>;
-#endif  // __Fuchsia__
-
-    Variants variants_ = {};
-  };
-
-  // Traverse the path to the target vnode, and create / open it using
-  // the underlying filesystem functions (lookup, create, open).
+  // Traverse the path to the target vnode, and create / open it using the underlying filesystem
+  // functions (lookup, create, open).
   //
-  // The return value will suggest the next action to take. Refer to the variants in
-  // |OpenResult| for more information.
+  // The return value will suggest the next action to take. Refer to the variants in |OpenResult|
+  // for more information.
   OpenResult Open(fbl::RefPtr<Vnode> vn, fbl::StringPiece path, VnodeConnectionOptions options,
                   Rights parent_rights, uint32_t mode) FS_TA_EXCLUDES(vfs_lock_);
   zx_status_t Unlink(fbl::RefPtr<Vnode> vn, fbl::StringPiece path) FS_TA_EXCLUDES(vfs_lock_);
@@ -168,16 +99,14 @@
   using ShutdownCallback = fit::callback<void(zx_status_t status)>;
   using CloseAllConnectionsForVnodeCallback = fit::callback<void()>;
 
-  // Unmounts the underlying filesystem. The result of shutdown is delivered via
-  // calling |closure|.
+  // Unmounts the underlying filesystem. The result of shutdown is delivered via calling |closure|.
   //
-  // |Shutdown| may be synchronous or asynchronous.
-  // The closure may be invoked before or after |Shutdown| returns.
+  // |Shutdown| may be synchronous or asynchronous. The closure may be invoked before or after
+  // |Shutdown| returns.
   virtual void Shutdown(ShutdownCallback closure) = 0;
 
-  // Identifies if the filesystem is in the process of terminating.
-  // May be checked by active connections, which, upon reading new
-  // port packets, should ignore them and close immediately.
+  // Identifies if the filesystem is in the process of terminating. May be checked by active
+  // connections, which, upon reading new port packets, should ignore them and close immediately.
   virtual bool IsTerminating() const = 0;
 
   void TokenDiscard(zx::event ios_token) FS_TA_EXCLUDES(vfs_lock_);
@@ -187,20 +116,18 @@
                    fbl::StringPiece newStr) FS_TA_EXCLUDES(vfs_lock_);
   zx_status_t Rename(zx::event token, fbl::RefPtr<Vnode> oldparent, fbl::StringPiece oldStr,
                      fbl::StringPiece newStr) FS_TA_EXCLUDES(vfs_lock_);
-  // Calls readdir on the Vnode while holding the vfs_lock, preventing path
-  // modification operations for the duration of the operation.
-  zx_status_t Readdir(Vnode* vn, vdircookie_t* cookie, void* dirents, size_t len,
-                      size_t* out_actual) FS_TA_EXCLUDES(vfs_lock_);
-
-  explicit Vfs(async_dispatcher_t* dispatcher);
+  // Calls readdir on the Vnode while holding the vfs_lock, preventing path modification operations
+  // for the duration of the operation.
+  zx_status_t Readdir(Vnode* vn, VdirCookie* cookie, void* dirents, size_t len, size_t* out_actual)
+      FS_TA_EXCLUDES(vfs_lock_);
 
   async_dispatcher_t* dispatcher() const { return dispatcher_; }
 
   void SetDispatcher(async_dispatcher_t* dispatcher);
 
-  // Begins serving VFS messages over the specified channel.
-  // If the vnode supports multiple protocols and the client requested more than one of them,
-  // it would use |Vnode::Negotiate| to tie-break and obtain the resulting protocol.
+  // Begins serving VFS messages over the specified channel. If the vnode supports multiple
+  // protocols and the client requested more than one of them, it would use |Vnode::Negotiate| to
+  // tie-break and obtain the resulting protocol.
   zx_status_t Serve(fbl::RefPtr<Vnode> vnode, zx::channel channel, VnodeConnectionOptions options)
       FS_TA_EXCLUDES(vfs_lock_);
 
@@ -209,12 +136,12 @@
   zx_status_t Serve(fbl::RefPtr<Vnode> vnode, zx::channel channel, Vnode::ValidatedOptions options)
       FS_TA_EXCLUDES(vfs_lock_);
 
-  // Called by a VFS connection when it is closed remotely.
-  // The VFS is now responsible for destroying the connection.
+  // Called by a VFS connection when it is closed remotely. The VFS is now responsible for
+  // destroying the connection.
   void OnConnectionClosedRemotely(internal::Connection* connection) FS_TA_EXCLUDES(vfs_lock_);
 
-  // Serves a Vnode over the specified channel (used for creating new filesystems);
-  // the Vnode must be a directory.
+  // Serves a Vnode over the specified channel (used for creating new filesystems); the Vnode must
+  // be a directory.
   zx_status_t ServeDirectory(fbl::RefPtr<Vnode> vn, zx::channel channel, Rights rights);
 
   // Convenience wrapper over |ServeDirectory| with maximum rights.
@@ -237,20 +164,19 @@
   // Unpin a handle to a remote filesystem from a vnode, if one exists.
   zx_status_t UninstallRemote(fbl::RefPtr<Vnode> vn, zx::channel* h) FS_TA_EXCLUDES(vfs_lock_);
 
-  // Forwards an open request to a remote handle.
-  // If the remote handle is closed (handing off returns ZX_ERR_PEER_CLOSED),
-  // it is automatically unmounted.
+  // Forwards an open request to a remote handle. If the remote handle is closed (handing off
+  // returns ZX_ERR_PEER_CLOSED), it is automatically unmounted.
   zx_status_t ForwardOpenRemote(fbl::RefPtr<Vnode> vn, zx::channel channel, fbl::StringPiece path,
                                 VnodeConnectionOptions options, uint32_t mode)
       FS_TA_EXCLUDES(vfs_lock_);
 
-  // Unpins all remote filesystems in the current filesystem, and waits for the
-  // response of each one with the provided deadline.
+  // Unpins all remote filesystems in the current filesystem, and waits for the response of each one
+  // with the provided deadline.
   zx_status_t UninstallAll(zx::time deadline) FS_TA_EXCLUDES(vfs_lock_);
 
-  // Shuts down a remote filesystem, by sending a |fuchsia.io/DirectoryAdmin.Unmount|
-  // request to the filesystem serving |handle| and awaits a response.
-  // |deadline| is the deadline for waiting for response.
+  // Shuts down a remote filesystem, by sending a |fuchsia.io/DirectoryAdmin.Unmount| request to the
+  // filesystem serving |handle| and awaits a response. |deadline| is the deadline for waiting for
+  // response.
   static zx_status_t UnmountHandle(zx::channel handle, zx::time deadline);
 
   bool IsTokenAssociatedWithVnode(zx::event token) FS_TA_EXCLUDES(vfs_lock_);
@@ -261,9 +187,9 @@
   bool ReadonlyLocked() const FS_TA_REQUIRES(vfs_lock_) { return readonly_; }
 
  private:
-  // Starting at vnode |vn|, walk the tree described by the path string,
-  // until either there is only one path segment remaining in the string
-  // or we encounter a vnode that represents a remote filesystem
+  // Starting at vnode |vn|, walk the tree described by the path string, until either there is only
+  // one path segment remaining in the string or we encounter a vnode that represents a remote
+  // filesystem
   //
   // On success,
   // |out| is the vnode at which we stopped searching.
@@ -276,9 +202,11 @@
       FS_TA_REQUIRES(vfs_lock_);
 
   // Attempt to create an entry with name |name| within the |vndir| directory.
+  //
   // - Upon success, returns a reference to the new vnode via |out_vn|, and return ZX_OK.
   // - Upon recoverable error (e.g. target already exists but |options| did not specify this to be
-  // fatal), attempt to lookup the vnode.
+  //   fatal), attempt to lookup the vnode.
+  //
   // In the above two cases, |did_create| will be updated to indicate if an entry was created.
   // Otherwise, a corresponding error code is returned.
   zx_status_t EnsureExists(fbl::RefPtr<Vnode> vndir, fbl::StringPiece name,
@@ -310,10 +238,9 @@
     fbl::RefPtr<Vnode> vn_;
   };
 
-  // The mount list is a global static variable, but it only uses
-  // constexpr constructors during initialization. As a consequence,
-  // the .init_array section of the compiled vfs-mount object file is
-  // empty; "remote_list" is a member of the bss section.
+  // The mount list is a global static variable, but it only uses constexpr constructors during
+  // initialization. As a consequence, the .init_array section of the compiled vfs-mount object file
+  // is empty; "remote_list" is a member of the bss section.
   MountNode::ListType remote_list_ FS_TA_GUARDED(vfs_lock_){};
 
   async_dispatcher_t* dispatcher_{};
@@ -322,11 +249,11 @@
   // A lock which should be used to protect lookup and walk operations
   mtx_t vfs_lock_{};
 
-  // Starts FIDL message dispatching on |channel|, at the same time
-  // starts to manage the lifetime of the connection.
+  // Starts FIDL message dispatching on |channel|, at the same time starts to manage the lifetime of
+  // the connection.
   //
-  // Implementations must ensure |connection| continues to live on, until
-  // |UnregisterConnection| is called on the pointer to destroy it.
+  // Implementations must ensure |connection| continues to live on, until |UnregisterConnection| is
+  // called on the pointer to destroy it.
   virtual zx_status_t RegisterConnection(std::unique_ptr<internal::Connection> connection,
                                          zx::channel channel) = 0;
 
@@ -336,6 +263,75 @@
 #endif  // ifdef __Fuchsia__
 };
 
+class Vfs::OpenResult {
+ public:
+  // When this variant is active, the indicated error occurred.
+  using Error = zx_status_t;
+
+#ifdef __Fuchsia__
+  // When this variant is active, the path being opened contains a remote node. |path| is the
+  // remaining portion of the path yet to be traversed. The caller should forward the remainder of
+  // this open request to that vnode.
+  struct Remote {
+    fbl::RefPtr<Vnode> vnode;
+    fbl::StringPiece path;
+  };
+
+  // When this variant is active, the path being opened is a remote node itself. The caller should
+  // clone the connection associated with this vnode.
+  struct RemoteRoot {
+    fbl::RefPtr<Vnode> vnode;
+  };
+#endif  // __Fuchsia__
+
+  // When this variant is active, |Open| has successfully reached a vnode under this filesystem.
+  // |validated_options| contains options to be used on the new connection, potentially adjusted for
+  // posix-flag rights expansion.
+  struct Ok {
+    fbl::RefPtr<Vnode> vnode;
+    Vnode::ValidatedOptions validated_options;
+  };
+
+  // Forwards the constructor arguments into the underlying |std::variant|. This allows |OpenResult|
+  // to be constructed directly from one of the variants, e.g.
+  //
+  //     OpenResult r = OpenResult::Error{ZX_ERR_ACCESS_DENIED};
+  //
+  template <typename T>
+  OpenResult(T&& v) : variants_(std::forward<T>(v)) {}
+
+  // Applies the |visitor| function to the variant payload. It simply forwards the visitor into the
+  // underlying |std::variant|. Returns the return value of |visitor|. Refer to C++ documentation
+  // for |std::visit|.
+  template <class Visitor>
+  constexpr auto visit(Visitor&& visitor) -> decltype(visitor(std::declval<zx_status_t>())) {
+    return std::visit(std::forward<Visitor>(visitor), variants_);
+  }
+
+  Ok& ok() { return std::get<Ok>(variants_); }
+  bool is_ok() const { return std::holds_alternative<Ok>(variants_); }
+
+  Error& error() { return std::get<Error>(variants_); }
+  bool is_error() const { return std::holds_alternative<Error>(variants_); }
+
+#ifdef __Fuchsia__
+  Remote& remote() { return std::get<Remote>(variants_); }
+  bool is_remote() const { return std::holds_alternative<Remote>(variants_); }
+
+  RemoteRoot& remote_root() { return std::get<RemoteRoot>(variants_); }
+  bool is_remote_root() const { return std::holds_alternative<RemoteRoot>(variants_); }
+#endif  // __Fuchsia__
+
+ private:
+#ifdef __Fuchsia__
+  using Variants = std::variant<Error, Remote, RemoteRoot, Ok>;
+#else
+  using Variants = std::variant<Error, Ok>;
+#endif  // __Fuchsia__
+
+  Variants variants_ = {};
+};
+
 }  // namespace fs
 
 #endif  // FS_VFS_H_
diff --git a/zircon/system/ulib/fs/include/fs/vnode.h b/zircon/system/ulib/fs/include/fs/vnode.h
index 5b0c9de..057350c 100644
--- a/zircon/system/ulib/fs/include/fs/vnode.h
+++ b/zircon/system/ulib/fs/include/fs/vnode.h
@@ -41,7 +41,7 @@
 namespace fs {
 
 class Vfs;
-struct vdircookie_t;
+struct VdirCookie;
 
 inline bool vfs_valid_name(fbl::StringPiece name) {
   return name.length() > 0 && name.length() <= NAME_MAX &&
@@ -270,11 +270,11 @@
   virtual void Sync(SyncCallback closure);
 
   // Read directory entries of vn, error if not a directory.
-  // FS-specific Cookie must be a buffer of vdircookie_t size or smaller.
+  // FS-specific Cookie must be a buffer of VdirCookie size or smaller.
   // Cookie must be zero'd before first call and will be used by
   // the readdir implementation to maintain state across calls.
   // To "rewind" and start from the beginning, cookie may be zero'd.
-  virtual zx_status_t Readdir(vdircookie_t* cookie, void* dirents, size_t len, size_t* out_actual);
+  virtual zx_status_t Readdir(VdirCookie* cookie, void* dirents, size_t len, size_t* out_actual);
 
   // METHODS FOR OPENED OR UNOPENED NODES
   //
diff --git a/zircon/system/ulib/fs/lazy_dir.cc b/zircon/system/ulib/fs/lazy_dir.cc
index 5a1302bc..27abacb 100644
--- a/zircon/system/ulib/fs/lazy_dir.cc
+++ b/zircon/system/ulib/fs/lazy_dir.cc
@@ -25,7 +25,7 @@
   return a_id < b_id ? -1 : 1;
 }
 
-bool DoDot(vdircookie_t* cookie) {
+bool DoDot(VdirCookie* cookie) {
   if (cookie->p == 0) {
     cookie->p = (void*)1;
     return true;
@@ -62,7 +62,7 @@
   return ZX_ERR_NOT_FOUND;
 }
 
-zx_status_t LazyDir::Readdir(vdircookie_t* cookie, void* dirents, size_t len, size_t* out_actual) {
+zx_status_t LazyDir::Readdir(VdirCookie* cookie, void* dirents, size_t len, size_t* out_actual) {
   LazyEntryVector entries;
   GetContents(&entries);
   qsort(entries.data(), entries.size(), sizeof(LazyEntry), CompareLazyDirPtrs);
diff --git a/zircon/system/ulib/fs/pseudo_dir.cc b/zircon/system/ulib/fs/pseudo_dir.cc
index 64b585f7..68f8e27 100644
--- a/zircon/system/ulib/fs/pseudo_dir.cc
+++ b/zircon/system/ulib/fs/pseudo_dir.cc
@@ -53,7 +53,7 @@
   return watcher_.WatchDir(vfs, this, mask, options, std::move(watcher));
 }
 
-zx_status_t PseudoDir::Readdir(vdircookie_t* cookie, void* data, size_t len, size_t* out_actual) {
+zx_status_t PseudoDir::Readdir(VdirCookie* cookie, void* data, size_t len, size_t* out_actual) {
   fs::DirentFiller df(data, len);
   zx_status_t r = 0;
   if (cookie->n < kDotId) {
diff --git a/zircon/system/ulib/fs/test/lazy_dir_tests.cc b/zircon/system/ulib/fs/test/lazy_dir_tests.cc
index 9fd16c5..8451ade 100644
--- a/zircon/system/ulib/fs/test/lazy_dir_tests.cc
+++ b/zircon/system/ulib/fs/test/lazy_dir_tests.cc
@@ -49,7 +49,7 @@
   TestLazyDirHelper test;
 
   {
-    fs::vdircookie_t cookie = {};
+    fs::VdirCookie cookie;
     uint8_t buffer[4096];
     size_t len;
 
@@ -61,7 +61,7 @@
 
   test.AddContent({1, "test"});
   {
-    fs::vdircookie_t cookie = {};
+    fs::VdirCookie cookie;
     uint8_t buffer[4096];
     size_t len;
 
@@ -81,7 +81,7 @@
   }
   test.AddContent({33, "aaaa"});
   {
-    fs::vdircookie_t cookie = {};
+    fs::VdirCookie cookie;
     uint8_t buffer[4096];
     size_t len;
 
@@ -100,7 +100,7 @@
   }
   {
     // Ensure manually setting cookie past entries excludes them, but leaves "."
-    fs::vdircookie_t cookie = {};
+    fs::VdirCookie cookie;
     cookie.n = 30;
     uint8_t buffer[4096];
     size_t len;
diff --git a/zircon/system/ulib/fs/test/pseudo_dir_tests.cc b/zircon/system/ulib/fs/test/pseudo_dir_tests.cc
index 4ce72ce..e8ffac0 100644
--- a/zircon/system/ulib/fs/test/pseudo_dir_tests.cc
+++ b/zircon/system/ulib/fs/test/pseudo_dir_tests.cc
@@ -57,7 +57,7 @@
 
   // readdir
   {
-    fs::vdircookie_t cookie = {};
+    fs::VdirCookie cookie;
     uint8_t buffer[4096];
     size_t length;
     EXPECT_EQ(dir->Readdir(&cookie, buffer, sizeof(buffer), &length), ZX_OK);
@@ -71,7 +71,7 @@
 
   // readdir with small buffer
   {
-    fs::vdircookie_t cookie = {};
+    fs::VdirCookie cookie;
     uint8_t buffer[2 * sizeof(vdirent) + 13];
     size_t length;
     EXPECT_EQ(dir->Readdir(&cookie, buffer, sizeof(buffer), &length), ZX_OK);
@@ -91,7 +91,7 @@
   // test removed entries do not appear in readdir or lookup
   dir->RemoveEntry("file1");
   {
-    fs::vdircookie_t cookie = {};
+    fs::VdirCookie cookie;
     uint8_t buffer[4096];
     size_t length;
     EXPECT_EQ(dir->Readdir(&cookie, buffer, sizeof(buffer), &length), ZX_OK);
@@ -108,7 +108,7 @@
 
   // readdir again
   {
-    fs::vdircookie_t cookie = {};
+    fs::VdirCookie cookie;
     uint8_t buffer[4096];
     size_t length;
     EXPECT_EQ(dir->Readdir(&cookie, buffer, sizeof(buffer), &length), ZX_OK);
diff --git a/zircon/system/ulib/fs/vfs.cc b/zircon/system/ulib/fs/vfs.cc
index efe48e3..3f27fbf 100644
--- a/zircon/system/ulib/fs/vfs.cc
+++ b/zircon/system/ulib/fs/vfs.cc
@@ -396,7 +396,7 @@
   return ZX_OK;
 }
 
-zx_status_t Vfs::Readdir(Vnode* vn, vdircookie_t* cookie, void* dirents, size_t len,
+zx_status_t Vfs::Readdir(Vnode* vn, VdirCookie* cookie, void* dirents, size_t len,
                          size_t* out_actual) {
   fbl::AutoLock lock(&vfs_lock_);
   return vn->Readdir(cookie, dirents, len, out_actual);
diff --git a/zircon/system/ulib/fs/vnode.cc b/zircon/system/ulib/fs/vnode.cc
index 311823f..5315390 100644
--- a/zircon/system/ulib/fs/vnode.cc
+++ b/zircon/system/ulib/fs/vnode.cc
@@ -158,7 +158,7 @@
 
 zx_status_t Vnode::SetAttributes(VnodeAttributesUpdate a) { return ZX_ERR_NOT_SUPPORTED; }
 
-zx_status_t Vnode::Readdir(vdircookie_t* cookie, void* dirents, size_t len, size_t* out_actual) {
+zx_status_t Vnode::Readdir(VdirCookie* cookie, void* dirents, size_t len, size_t* out_actual) {
   return ZX_ERR_NOT_SUPPORTED;
 }
 
diff --git a/zircon/system/ulib/fs/watcher.cc b/zircon/system/ulib/fs/watcher.cc
index e4c876d..8481395 100644
--- a/zircon/system/ulib/fs/watcher.cc
+++ b/zircon/system/ulib/fs/watcher.cc
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include <fs/watcher.h>
-
 #include <stdlib.h>
 #include <string.h>
 #include <sys/stat.h>
@@ -11,6 +9,8 @@
 
 #include <memory>
 
+#include <fs/watcher.h>
+
 #ifdef __Fuchsia__
 #include <fuchsia/io/llcpp/fidl.h>
 #include <zircon/device/vfs.h>
@@ -35,8 +35,7 @@
 WatcherContainer::~WatcherContainer() = default;
 
 WatcherContainer::VnodeWatcher::VnodeWatcher(zx::channel h, uint32_t mask)
-    : h(std::move(h)),
-      mask(mask & ~(fio::WATCH_MASK_EXISTING | fio::WATCH_MASK_IDLE)) {}
+    : h(std::move(h)), mask(mask & ~(fio::WATCH_MASK_EXISTING | fio::WATCH_MASK_IDLE)) {}
 
 WatcherContainer::VnodeWatcher::~VnodeWatcher() {}
 
@@ -101,7 +100,7 @@
   }
 
   if (mask & fio::WATCH_MASK_EXISTING) {
-    vdircookie_t dircookie;
+    VdirCookie dircookie;
     memset(&dircookie, 0, sizeof(dircookie));
     char readdir_buf[FDIO_CHUNK_SIZE];
     WatchBuffer wb;
diff --git a/zircon/system/ulib/memfs/directory.cc b/zircon/system/ulib/memfs/directory.cc
index 981b642..baff603 100644
--- a/zircon/system/ulib/memfs/directory.cc
+++ b/zircon/system/ulib/memfs/directory.cc
@@ -119,8 +119,7 @@
   return ZX_OK;
 }
 
-zx_status_t VnodeDir::Readdir(fs::vdircookie_t* cookie, void* data, size_t len,
-                              size_t* out_actual) {
+zx_status_t VnodeDir::Readdir(fs::VdirCookie* cookie, void* data, size_t len, size_t* out_actual) {
   fs::DirentFiller df(data, len);
   if (!IsDirectory()) {
     // This WAS a directory, but it has been deleted.
diff --git a/zircon/system/ulib/memfs/dnode.cc b/zircon/system/ulib/memfs/dnode.cc
index e732349..70a05f4 100644
--- a/zircon/system/ulib/memfs/dnode.cc
+++ b/zircon/system/ulib/memfs/dnode.cc
@@ -118,7 +118,7 @@
   size_t order;  // Minimum 'order' of the next dnode dirent to be read.
 };
 
-static_assert(sizeof(dircookie_t) <= sizeof(fs::vdircookie_t),
+static_assert(sizeof(dircookie_t) <= sizeof(fs::VdirCookie),
               "MemFS dircookie too large to fit in IO state");
 
 // Read the canned "." and ".." entries that should
diff --git a/zircon/system/ulib/memfs/include/lib/memfs/cpp/vnode.h b/zircon/system/ulib/memfs/include/lib/memfs/cpp/vnode.h
index d9ec0e3..cbe93df 100644
--- a/zircon/system/ulib/memfs/include/lib/memfs/cpp/vnode.h
+++ b/zircon/system/ulib/memfs/include/lib/memfs/cpp/vnode.h
@@ -138,8 +138,7 @@
   void SetRemote(zx::channel remote) final;
 
  private:
-  zx_status_t Readdir(fs::vdircookie_t* cookie, void* dirents, size_t len,
-                      size_t* out_actual) final;
+  zx_status_t Readdir(fs::VdirCookie* cookie, void* dirents, size_t len, size_t* out_actual) final;
 
   // Resolves the question, "Can this directory create a child node with the name?"
   // Returns "ZX_OK" on success; otherwise explains failure with error message.
diff --git a/zircon/system/ulib/syslog/global.cc b/zircon/system/ulib/syslog/global.cc
index 44b23b1..1137cfb 100644
--- a/zircon/system/ulib/syslog/global.cc
+++ b/zircon/system/ulib/syslog/global.cc
@@ -38,9 +38,10 @@
 fx_logger_t* get_or_create_global_logger(bool connect) {
   // Upon initialization, the default logger is either provided with a
   // socket connection, or a fallback file-descriptor (which it will use)
-  // or it will be initialized to log to STDERR.
-  static std::unique_ptr<fx_logger_t> logger(MakeDefaultLogger(connect));
-  return logger.get();
+  // or it will be initialized to log to STDERR. This object is contructed on
+  // the first call to this function and will be leaked on shutdown.
+  static fx_logger_t* logger = MakeDefaultLogger(connect);
+  return logger;
 }
 
 SYSLOG_EXPORT
diff --git a/zircon/system/ulib/syslog/test/syslog_tests.cc b/zircon/system/ulib/syslog/test/syslog_tests.cc
index ff64f17..b60750d 100644
--- a/zircon/system/ulib/syslog/test/syslog_tests.cc
+++ b/zircon/system/ulib/syslog/test/syslog_tests.cc
@@ -44,6 +44,15 @@
 
 }  // namespace
 
+// Ensure accessing the global logger is safe when a global object is being torn down.
+class LogDuringTeardownTest {
+ public:
+  ~LogDuringTeardownTest() {
+    // This should not crash.
+    FX_LOG(INFO, NULL, "message");
+  }
+} g_log_during_teardown;
+
 TEST(SyslogTests, test_log_init_with_socket) {
   zx::socket socket0, socket1;
   EXPECT_EQ(ZX_OK, zx::socket::create(0, &socket0, &socket1));