[release] Snap to cafe43e483
Change-Id: Ibb745733fba7dc3e9e4822a2fd08b2b2f2fc9c88
diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
new file mode 100644
index 0000000..7c56d5f
--- /dev/null
+++ b/.git-blame-ignore-revs
@@ -0,0 +1,10 @@
+# Commits in this file will be ignored by git hyper-blame.
+
+# Reformat //zircon/kernel
+b45c83e0a3035f185d41e130956187826c30a086
+# Reformat most of the tree
+b1c2f508a59e6c87c617852ed3e424693a392646
+# Reformat //zircon/third_party/musl
+7c5e521391fddb98fd8f6970da7c410899ddf5cf
+# Reformat .c files in //zircon
+08ad01f521b1523b29fbacf281f3ce3317ef9063
diff --git a/.gitignore b/.gitignore
index 28f5913..2066118 100644
--- a/.gitignore
+++ b/.gitignore
@@ -105,3 +105,10 @@
 # vscode) that is aware of submodules. This generated .gitmodules file
 # shouldn't be tracked in source control.
 /.gitmodules
+
+# Running `fx check-licenses` will generate a NOTICE file at the root of the fuchsia repository. The
+# file represents the license state of the entire repository.
+# We automatically run this command during some user/userdebug builds, so it can be included in the
+# Fuchsia images.
+/NOTICE.html
+/NOTICE.html.gz
diff --git a/.gn b/.gn
index 44d343d..0687137 100644
--- a/.gn
+++ b/.gn
@@ -44,10 +44,10 @@
   "//build/config/clang/clang.gni",
   "//build/config/mac/mac_sdk.gni",
   "//build/config/sanitizers/BUILD.gn",
+  "//build/prebuilt/BUILD.gn",
   "//build/toolchain/concurrent_jobs.gni",
   "//build/toolchain/zircon/zircon_toolchain.gni",
-  "//build/unification/lib/zircon_runtime_library.gni",
   "//src/graphics/lib/compute/spinel-rs-sys/BUILD.gn",
-  "//topaz/runtime/sky_engine/BUILD.gn",
   "//topaz/tools/protobuf/protobuf_dart.gni",
+  "//zircon/kernel/arch/x86/phys/BUILD.gn",
 ]
diff --git a/BUILD.gn b/BUILD.gn
index e3f3aa8b..b4e79d2 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -13,6 +13,7 @@
 import("//build/toolchain/ccache.gni")
 import("//build/toolchain/goma.gni")
 import("//build/toolchain/zircon/clang.gni")
+import("//zircon/public/gn/config/experimental_cxx_version.gni")
 
 # TODO(fxbug.dev/60410): remove import when the ZN build is gone.
 import("//build/zbi/zbi.gni")
@@ -111,6 +112,7 @@
   # Any individual Zircon build argument set in `zircon_extra_args` will
   # silently clobber the default value shown here.
   zircon_args = {
+    experimental_cxx_version = experimental_cxx_version
     use_ccache = use_ccache
     use_goma = use_goma
     output_gsym = output_gsym
@@ -337,9 +339,80 @@
   contents = read_file("$root_build_dir/args.gn", "scope")
 }
 
-# See //zircon:binaries for a description and schema definition.
-# in file: "$root_build_dir/binaries.json"
+# This describes all the binaries linked by the build.
 #
+# This enumerates each linked binary (executable, shared library, or
+# loadable/"plug-in" module) used by the build, or produced by the build.
+#
+# This includes host tools, kernels, boot loaders, drivers, as well as
+# normal executables. This also includes prebuilt toolchain runtime
+# libraries that end up in image files. It does not yet include any
+# non-native binary formats.
+#
+# For non-prebuilts, this is meant to reach the entire dependency graph
+# of all binaries that the build would ever produce.  Not every binary
+# described is necessarily actually produced by any given Ninja run.  Either
+# the $debug or the $dist file for any individual binary can be passed to
+# Ninja as a specific target argument to ensure it's built and
+# up to date before making use of that binary.  Like all build_api_module()
+# targets, the top-level "binaries" target serves as a Ninja target to
+# request that every binary described be built.
+#
+# Note that in certain cases, the paths in `debug` and `dist` will
+# point out of the build tree, and thus cannot be used as Ninja targets.
+# This happens for prebuilts or binaries produced by the Zircon build.
+#
+# Type: list(scope)
+#
+#   cpu
+#     Required: CPU architecture the binary is for, e.g. "arm64" or "x64".
+#     Type: string
+#
+#   os
+#     Required: OS the binary is for, e.g. "fuchsia", "linux", or "mac".
+#     Type: string
+#
+#   environment
+#     Required: The ${toolchain.environment} name of what specific
+#     execution this was built for, e.g. "user", "host", "guest".  The
+#     tuple of ($cpu, $os, $environment) should indicate what hardware and
+#     software environment this binary is compatible with.
+#     Type: string
+#
+#   label
+#     Required: The GN label of the binary target.
+#     Type: label_with_toolchain
+#
+#   type
+#     Required: The type of binary.
+#     Type: "executable" or "shared_library" or "loadable_module"
+#
+#   debug
+#     Required: Path to where the unstripped or separate debug file is
+#     found, relative to $root_build_dir.  If $dist is omitted, this
+#     is also the file that is used at runtime.
+#     Type: path relative to $root_build_dir
+#
+#   dist
+#     Optional: Path to where the stripped binary for deployment/execution is
+#     found, relative to $root_build_dir.  This binary may be required for
+#     some debugging tasks if $debug is a separate debug file rather than
+#     an unstripped file.  It should exactly match the binary that will be
+#     seen on devices or run directly on hosts.
+#     Type: path relative to $root_build_dir
+#
+#   elf_build_id
+#     Optional: Path to a file containing the lowercase ASCII hexadecimal
+#     representation of the ELF build ID in this binary.  This is omitted
+#     for OS environments that don't use ELF.  For an ELF binary that
+#     doesn't have a build ID note, this key will be present but point to
+#     an empty file.
+#     Type: path relative to $root_build_dir
+#
+#   breakpad
+#     Optional: Path to the breakpad symbol file for the debug binary.  This
+#     will only be present if $output_breakpad_syms was set.
+#     Type: path relative to $root_build_dir
 build_api_module("binaries") {
   testonly = true
   data_keys = [ "binaries" ]
@@ -458,8 +531,13 @@
   }
 }
 
-# See //zircon:generated_sources for a description and schema definition.
-# in file: "$root_build_dir/generated_sources.json"
+# This describes all the generated source files in the build.
+#
+# The intent is that telling Ninja to build all these individual files
+# will be the minimal work sufficient for source code analysis of all
+# the files described in the compilation database to be viable.
+#
+# Type: list(path relative to $root_build_dir)
 #
 build_api_module("generated_sources") {
   testonly = true
@@ -679,8 +757,41 @@
   ]
 }
 
-# See //zircon:tests for a description and schema definition.
-# in file: "$root_build_dir/tests.json"
+# Tests in the build.
+#
+#  Below, the "testing root" refers to $root_build_dir on host, and the full
+#  filesystem path from the root on device.
+#
+# Type: list(scope)
+#
+#   * name
+#     - Required: Name of test.
+#     - Type: string
+#
+#   * label
+#     - Required: GN label associated with the test
+#     - Type: label_with_toolchain
+#
+#   * path
+#     - Required: Path to the test's executable.
+#     - Type: path relative to the testing root.
+#
+#   * cpu, os
+#     - Required: $current_cpu and $current_os values, respectively, for
+#       which this test is intended.
+#     - Type: string
+#
+#   * disabled
+#     - Optional: a free-form string indicating a reason for the test being
+#       disabled.
+#     - Type: string
+#
+#   * runtime_deps
+#     - Optional: a JSON file containing a list of root_build_dir-relative
+#       paths defining ascribed runtime dependencies of the test. These
+#       dependencies are aggregated via the metadata graph of the associated
+#       test target under a data key of "test_runtime_deps.
+#     - Type: path relative to root_build_dir
 #
 build_api_module("tests") {
   testonly = true
@@ -746,11 +857,11 @@
     ":tool_paths.llvm-tools",
     ":tool_paths.metadata",
     "//build/images:packages",
+    "//src/storage/bin/fvm($host_toolchain)",
     "//tools/doc_checker($host_toolchain)",
     "//zircon/third_party/uapp/mkfs-msdosfs($host_toolchain)",
     "//zircon/tools/blobfs($host_toolchain)",
     "//zircon/tools/fidl:tools($host_toolchain)",
-    "//zircon/tools/fvm($host_toolchain)",
     "//zircon/tools/kazoo($host_toolchain)",
     "//zircon/tools/ktrace-dump($host_toolchain)",
     "//zircon/tools/lz4($host_toolchain)",
@@ -855,7 +966,56 @@
 # This describes all the "ZBI tests" the build can produce.
 # in file: "$root_build_dir/zbi_tests.json"
 #
-# See //zircon:zbi_tests for a description and a schema definition.
+# This tells the infra recipes how to run ZBI tests, defined with zbi_test(),
+# which is a class of tests that are 'run' by booting an associated ZBI and
+# listening on serial for a particular string indicating success.
+#
+# The schema produced here matches //:images with the addition of the
+# `success_string` key.
+#
+# Type: list(scope)
+#
+#   * cpu
+#     - Required: CPU architecture the image is for, e.g. "arm64" or "x64".
+#     - Type: string
+#
+#   * name
+#     - Required: The primary way that this ZBI is known to consumers.
+#     Note that the name need not be unique within the images list.
+#     The tuple of ($name, $cpu) should be unique.
+#     - Type: string
+#
+#   * label
+#     - Required: The GN label of the image target.
+#     - Type: label_with_toolchain
+#
+#   * path
+#     - Required: Path to where the file is found, relative to $root_build_dir.
+#     This is also the argument to Ninja that ensures this image will be built.
+#     - Type: path relative to $root_build_dir
+#
+#   * bootserver_netboot
+#     - Required: bootserver commandline option for booting the ZBI.
+#     - Type: string
+#
+#   * success_string
+#     - Required: The string that the ZBI test outputs to indicate success.
+#     - Type: string
+#
+#   * device_types
+#     - Required: The list of device types that this test should be run on.
+#     //build/testing/platforms.gni lists the allowed values; others will
+#     be ignored.
+#     - Type: list(string)
+#
+#   * qemu_kernel_label
+#     - Optional: Label appearing in $label of an //:images entry.
+#     That image should be used in place of the usual `qemu-kernel` image.
+#     - Type: label_with_toolchain
+#
+#   * timeout
+#     - Optional: Timeout for running the test, in seconds.
+#     - Type: seconds
 #
 build_api_module("zbi_tests") {
   testonly = true
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 59dae62..9813381 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,31 +1,34 @@
 # Contributing to Fuchsia
 
-We'd love to accept your patches and contributions to the Fuchsia project. There are
-just a few small guidelines you need to follow.
-
-For the detailed instructions on how to contribute changes,
-see the [Contribute changes](/docs/development/source_code/contribute_changes.md) page.
+Fuchsia lets anyone contribute to the project, regardless of their employer.
+The Fuchsia project reviews and encourages well-tested, high-quality
+contributions from anyone who wants to contribute to Fuchsia.
 
 ## Contributor License Agreement
 
 Contributions to this project must be accompanied by a Contributor License
-Agreement. You (or your employer) retain the copyright to your contribution;
-this simply gives us permission to use and redistribute your contributions as
-part of the project. Head over to <https://cla.developers.google.com/> to see
-your current agreements on file or to sign a new one.
+Agreement (CLA).
 
-You generally only need to submit a CLA once, so if you've already submitted one
-(even if it was for a different project), you probably don't need to do it
-again.
+To see any Contributor License Agreements on file or to sign a CLA, go to <https://cla.developers.google.com/>.
 
-## Code reviews
+For more information about the Google CLA, see [Contributor License Agreements](https://cla.developers.google.com/about).
 
-All submissions, including submissions by project members, require review. We
-use [Gerrit](https://fuchsia-review.googlesource.com/) for this purpose. Consult
-[Gerrit User Guide](https://gerrit-review.googlesource.com/Documentation/intro-user.html) for more
-information on using Gerrit.
+## Contributing changes and submitting code reviews
 
-## Community Guidelines
+All changes require review, including changes by project members.
 
-This project follows
-[Google's Open Source Community Guidelines](https://opensource.google/conduct/).
+For detailed instructions on how to contribute changes,
+see [Contribute changes](/docs/development/source_code/contribute_changes.md).
+
+## Community guidelines
+
+This project observes the following community guidelines:
+
+  * [Google's Open Source Community Guidelines](https://opensource.google/conduct/)
+
+  * [Fuchsia Code of Conduct](/docs/CODE_OF_CONDUCT.md)
+
+## Governance
+
+Review Fuchsia's [Governance](/docs/contribute/governance/governance.md)
+statement.
diff --git a/LICENSE b/LICENSE
index 6b55e08..f839b1d 100644
--- a/LICENSE
+++ b/LICENSE
@@ -10,9 +10,6 @@
 copyright notice, this list of conditions and the following disclaimer
 in the documentation and/or other materials provided with the
 distribution.
-   * Neither the name of Google LLC. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
 
 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
diff --git a/boards/chromebook-x64.gni b/boards/chromebook-x64.gni
index 64bb19a..85def4f 100644
--- a/boards/chromebook-x64.gni
+++ b/boards/chromebook-x64.gni
@@ -28,12 +28,12 @@
   "//src/devices/usb/drivers/xhci-rewrite:xhci",
   "//src/graphics/display/drivers/intel-i915",
   "//src/media/audio/bin/ihda",
-  "//src/media/audio/drivers/alc5514",
-  "//src/media/audio/drivers/alc5663",
+  "//src/media/audio/drivers/codecs/alc5514",
+  "//src/media/audio/drivers/codecs/alc5663",
+  "//src/media/audio/drivers/codecs/max98927",
   "//src/media/audio/drivers/intel-hda/codecs/hdmi:hdmi-audio-codec",
   "//src/media/audio/drivers/intel-hda/codecs/realtek:realtek-audio-codec",
   "//src/media/audio/drivers/intel-hda/controller:intel-hda",
-  "//src/media/audio/drivers/max98927",
   "//src/ui/input/drivers/i2c-hid",
   "//src/ui/input/drivers/pc-ps2",
 ]
diff --git a/boards/qemu-x64.gni b/boards/qemu-x64.gni
index 8ec8333..d240194 100644
--- a/boards/qemu-x64.gni
+++ b/boards/qemu-x64.gni
@@ -29,12 +29,12 @@
   "//src/graphics/display/drivers/simple:simple.intel",
   "//src/graphics/display/drivers/simple:simple.nv",
   "//src/graphics/display/drivers/simple:simple.vmware",
-  "//src/media/audio/drivers/alc5514",
-  "//src/media/audio/drivers/alc5663",
+  "//src/media/audio/drivers/codecs/alc5514",
+  "//src/media/audio/drivers/codecs/alc5663",
+  "//src/media/audio/drivers/codecs/max98927",
   "//src/media/audio/drivers/intel-hda/codecs/hdmi:hdmi-audio-codec",
   "//src/media/audio/drivers/intel-hda/codecs/realtek:realtek-audio-codec",
   "//src/media/audio/drivers/intel-hda/controller:intel-hda",
-  "//src/media/audio/drivers/max98927",
   "//src/ui/input/drivers/i2c-hid",
   "//src/ui/input/drivers/pc-ps2",
   "//zircon/third_party/dev/ethernet/e1000",
diff --git a/boards/vim2.gni b/boards/vim2.gni
index 025bb25..52f667f 100644
--- a/boards/vim2.gni
+++ b/boards/vim2.gni
@@ -17,6 +17,7 @@
   "//src/devices/clock/drivers/amlogic-clk",
   "//src/devices/gpio/drivers/aml-gxl-gpio",
   "//src/devices/i2c/drivers/aml-i2c",
+  "//src/devices/registers/drivers/registers",
   "//src/devices/rtc/drivers/nxp",
   "//src/devices/scpi/drivers/aml-scpi-s912:aml-scpi-s912.mailbox",
   "//src/devices/scpi/drivers/aml-scpi-s912:scpi",
diff --git a/boards/vim3.gni b/boards/vim3.gni
index e19961d..1b5320f 100644
--- a/boards/vim3.gni
+++ b/boards/vim3.gni
@@ -20,6 +20,8 @@
   "//src/devices/ml/drivers/aml-nna",
   "//src/devices/registers/drivers/registers",
   "//src/devices/rtc/drivers/nxp",
+  "//src/devices/usb/drivers/aml-usb-phy-v2",
+  "//src/devices/usb/drivers/dwc2",
 ]
 
 board_bootfs_labels += _common_bootfs_deps
diff --git a/boards/x64.gni b/boards/x64.gni
index e031e1d..6692802 100644
--- a/boards/x64.gni
+++ b/boards/x64.gni
@@ -33,12 +33,12 @@
   "//src/graphics/display/drivers/simple:simple.nv",
   "//src/graphics/display/drivers/simple:simple.vmware",
   "//src/media/audio/bin/ihda",
-  "//src/media/audio/drivers/alc5514",
-  "//src/media/audio/drivers/alc5663",
+  "//src/media/audio/drivers/codecs/alc5514",
+  "//src/media/audio/drivers/codecs/alc5663",
+  "//src/media/audio/drivers/codecs/max98927",
   "//src/media/audio/drivers/intel-hda/codecs/hdmi:hdmi-audio-codec",
   "//src/media/audio/drivers/intel-hda/codecs/realtek:realtek-audio-codec",
   "//src/media/audio/drivers/intel-hda/controller:intel-hda",
-  "//src/media/audio/drivers/max98927",
   "//src/ui/input/drivers/i2c-hid",
   "//src/ui/input/drivers/pc-ps2",
   "//zircon/third_party/dev/ethernet/e1000",
diff --git a/build/BUILD.gn b/build/BUILD.gn
index fd4cb8e..c89126b 100644
--- a/build/BUILD.gn
+++ b/build/BUILD.gn
@@ -13,6 +13,7 @@
     "sdk:tests",
     "tools:tests",
     "tracer:tests",
+    "zbi:tests",
   ]
 }
 
@@ -37,14 +38,39 @@
   #
   # Maintainers will accept changes to the allowlist below that support
   # refactors, such as moving a legacy target to a different directory.
-  visibility = [
+  driver_package_users = [ "//src/connectivity/ethernet/drivers/rndis-host" ]
+
+  shell_true_users = [
+    "//src/connectivity/management/network-manager-cli",
+    "//src/connectivity/network/dhcpd-cli",
+    "//src/connectivity/network/mdns/util:mdns-util",
+    "//src/connectivity/network/net-cli",
+    "//src/connectivity/network/netdump",
+    "//src/connectivity/network/netstack:tools",
+    "//src/connectivity/network/netstack/tests/manual:netstack-manual-tests",
+    "//src/connectivity/network/tools/nc",
+    "//src/connectivity/network/tools/sockscripter:package",
+    "//third_party/network-conformance/vendor/MNTCPApp:mntcpapp",
+    "//third_party/network-conformance/vendor/UDPStub:udpstub",
+  ]
+
+  non_default_checkout_users = [
+    # This git repo is only checked out when the vulkan-cts attribute is set.
+    "//third_party/vulkan-cts/fuchsia/*",
+    "//third_party/vulkan-cts/fuchsia/split-cts/*",
+  ]
+
+  visibility =
+      driver_package_users + shell_true_users + non_default_checkout_users
+
+  # To regenerate:
+  # fx gn refs $(fx get-build-dir) //build:deprecated_package | sed -E 's|([^:]*):.*|"\1\/*",|g' | sed 's|.*//vendor/.*|"//vendor/*",|' | sort | uniq
+  visibility += [
     "//build/images/*",
     "//build/images/guest/*",
     "//build/images/overnet/*",
     "//build/images/recovery/*",
     "//build/info/*",
-    "//examples/components/basic/integration_tests/*",
-    "//examples/components/routing_failed/*",
     "//examples/cowsay/*",
     "//examples/criterion_bench/*",
     "//examples/diagnostics/inspect/codelab/cpp/*",
@@ -54,11 +80,6 @@
     "//examples/diagnostics/inspect/rust-vmo/*",
     "//examples/diagnostics/inspect/rust/*",
     "//examples/dotmatrix_display/*",
-    "//examples/fidl/dart/client/*",
-    "//examples/fidl/dart/request_pipelining/client/*",
-    "//examples/fidl/dart/request_pipelining/server/*",
-    "//examples/fidl/dart/server/*",
-    "//examples/fidl/dart/test/*",
     "//examples/fidl/go/client/*",
     "//examples/fidl/go/request_pipelining/client/*",
     "//examples/fidl/go/request_pipelining/server/*",
@@ -97,7 +118,6 @@
     "//garnet/bin/sysmem_connector/*",
     "//garnet/bin/terminal/*",
     "//garnet/bin/test_runner/*",
-    "//garnet/bin/thermd/*",
     "//garnet/bin/time/*",
     "//garnet/bin/timezone/*",
     "//garnet/bin/timezone/test/*",
@@ -135,7 +155,6 @@
     "//garnet/examples/tcp/tcp_file_sender/*",
     "//garnet/go/src/benchmarking/*",
     "//garnet/go/src/device_settings/*",
-    "//garnet/go/src/far/*",
     "//garnet/go/src/go-tuf/*",
     "//garnet/go/src/inspect/*",
     "//garnet/go/src/merkle/*",
@@ -151,7 +170,6 @@
     "//garnet/lib/rust/webpki-roots-fuchsia/*",
     "//garnet/lib/trace_converters/*",
     "//garnet/public/lib/fostr/*",
-    "//garnet/public/rust/*",
     "//sdk/lib/fuzzing/cpp/*",
     "//sdk/lib/inspect/contrib/cpp/*",
     "//sdk/lib/inspect/contrib/cpp/tests/*",
@@ -202,21 +220,13 @@
     "//src/connectivity/bluetooth/hci/vendor/broadcom/*",
     "//src/connectivity/bluetooth/hci/vendor/intel/*",
     "//src/connectivity/bluetooth/hci/vendor/mediatek/*",
-    "//src/connectivity/bluetooth/lib/async-helpers/*",
-    "//src/connectivity/bluetooth/lib/bt-a2dp/*",
-    "//src/connectivity/bluetooth/lib/bt-avctp/*",
-    "//src/connectivity/bluetooth/lib/bt-avdtp/*",
-    "//src/connectivity/bluetooth/lib/bt-fidl-mocks/*",
     "//src/connectivity/bluetooth/lib/fuchsia-audio-codec/*",
     "//src/connectivity/bluetooth/lib/fuchsia-audio-device-output/*",
     "//src/connectivity/bluetooth/lib/fuchsia-bluetooth/*",
     "//src/connectivity/bluetooth/profiles/bt-a2dp-manager/*",
     "//src/connectivity/bluetooth/profiles/bt-a2dp-sink/*",
     "//src/connectivity/bluetooth/profiles/bt-a2dp-source/*",
-    "//src/connectivity/bluetooth/profiles/bt-avrcp-target/*",
-    "//src/connectivity/bluetooth/profiles/bt-avrcp/*",
     "//src/connectivity/bluetooth/profiles/bt-hog/*",
-    "//src/connectivity/bluetooth/profiles/bt-rfcomm/*",
     "//src/connectivity/bluetooth/tests/*",
     "//src/connectivity/bluetooth/tests/bt-profile-test-server/*",
     "//src/connectivity/bluetooth/tools/bt-avdtp-tool/*",
@@ -233,15 +243,6 @@
     "//src/connectivity/bluetooth/tools/bt-snoop-cli/*",
     "//src/connectivity/bluetooth/tools/bt-snoop/*",
     "//src/connectivity/bluetooth/tools/tests/bt-tool-cli-tests/*",
-    "//src/connectivity/ethernet/drivers/ethernet/*",
-    "//src/connectivity/ethernet/drivers/ethertap/*",
-    "//src/connectivity/ethernet/drivers/rndis-function/*",
-    "//src/connectivity/ethernet/drivers/rndis-host/*",
-    "//src/connectivity/ethernet/drivers/virtio/*",
-    "//src/connectivity/lib/internet-checksum/*",
-    "//src/connectivity/lib/net-declare/*",
-    "//src/connectivity/lib/net-types/*",
-    "//src/connectivity/lib/network-device/*",
     "//src/connectivity/location/emergency/*",
     "//src/connectivity/location/regulatory_region/*",
     "//src/connectivity/location/regulatory_region/tests/*",
@@ -253,37 +254,10 @@
     "//src/connectivity/lowpan/service/*",
     "//src/connectivity/lowpan/tests/*",
     "//src/connectivity/lowpan/tools/lowpanctl/*",
-    "//src/connectivity/management/network-manager-cli/*",
-    "//src/connectivity/management/reachability/*",
-    "//src/connectivity/management/tests/network_manager_integration_tests/*",
-    "//src/connectivity/network/dhcpd-cli/*",
-    "//src/connectivity/network/drivers/network-device/*",
-    "//src/connectivity/network/drivers/network-device/device/*",
-    "//src/connectivity/network/drivers/network-device/mac/*",
-    "//src/connectivity/network/lib/dns_server_watcher/*",
-    "//src/connectivity/network/mdns/util/*",
-    "//src/connectivity/network/net-cli/*",
-    "//src/connectivity/network/netdump/*",
-    "//src/connectivity/network/netstack/*",
-    "//src/connectivity/network/netstack/dhcp/*",
-    "//src/connectivity/network/netstack/dns/*",
-    "//src/connectivity/network/netstack/inspect/validator/*",
-    "//src/connectivity/network/netstack/routes/*",
-    "//src/connectivity/network/netstack/tests/manual/*",
-    "//src/connectivity/network/netstack3/tools/*",
-    "//src/connectivity/network/testing/netemul/runner/runner/*",
-    "//src/connectivity/network/tests/benchmarks/*",
-    "//src/connectivity/network/tools/nc/*",
-    "//src/connectivity/network/tools/network-speed-test/*",
-    "//src/connectivity/network/tools/network-speed-test/integration/*",
-    "//src/connectivity/network/tools/sockscripter/*",
     "//src/connectivity/openthread/drivers/ot-radio/*",
     "//src/connectivity/openthread/tests/fake-drivers/fake-ot-radio/*",
     "//src/connectivity/openthread/tests/test-components/*",
     "//src/connectivity/openthread/third_party/openthread/platform/tests/*",
-    "//src/connectivity/overnet/lib/core/*",
-    "//src/connectivity/overnet/lib/hoist/*",
-    "//src/connectivity/overnet/lib/serial_link/*",
     "//src/connectivity/overnet/tools/debug-serial/*",
     "//src/connectivity/overnet/tools/onet/*",
     "//src/connectivity/ppp/drivers/serial-ppp/*",
@@ -300,9 +274,7 @@
     "//src/connectivity/telephony/tests/fake-drivers/usb-qmi-function/*",
     "//src/connectivity/telephony/tools/ril-ctl/*",
     "//src/connectivity/telephony/tools/tel-snoop-cli/*",
-    "//src/connectivity/weave/adaptation/tests/*",
     "//src/connectivity/weave/weavestack/*",
-    "//src/connectivity/wlan/drivers/mediatek/ralink/*",
     "//src/connectivity/wlan/drivers/realtek/rtl88xx/*",
     "//src/connectivity/wlan/drivers/testing/lib/sim-device/test/*",
     "//src/connectivity/wlan/drivers/testing/lib/sim-env/test/*",
@@ -465,7 +437,6 @@
     "//src/diagnostics/archivist/tests/v2/*",
     "//src/diagnostics/lib/triage/wasm/*",
     "//src/diagnostics/sampler/*",
-    "//src/diagnostics/validator/logs/encoding/puppet/dart/*",
     "//src/diagnostics/wasm/example/*",
     "//src/experiences/session_shells/ermine/session/*",
     "//src/factory/factory_store_providers/*",
@@ -501,22 +472,13 @@
     "//src/graphics/drivers/msd-vsi-vip/tests/integration/*",
     "//src/graphics/drivers/msd-vsi-vip/tests/unit_tests/*",
     "//src/graphics/drivers/virtio/*",
-    "//src/graphics/examples/vkprimer/*",
-    "//src/graphics/examples/vkprimer/cmd-buf-benchmark/*",
-    "//src/graphics/examples/vkprimer/transaction-elimination-test/*",
+    "//src/graphics/examples/vkproto/*",
+    "//src/graphics/examples/vkproto/transaction-elimination-test/*",
     "//src/graphics/examples/vulkaninfo/*",
-    "//src/graphics/lib/compute/*",
-    "//src/graphics/lib/compute/examples/*",
-    "//src/graphics/lib/compute/hotsort/platforms/vk/tests/hotsort_vk_bench/*",
     "//src/graphics/lib/compute/mold/*",
     "//src/graphics/lib/compute/spinel-rs-sys/*",
     "//src/graphics/lib/compute/spinel-rs/*",
-    "//src/graphics/lib/compute/spinel/platforms/vk/tests/spinel_vk_svg/*",
     "//src/graphics/lib/compute/surpass/*",
-    "//src/graphics/lib/compute/tests/vk-app-state-test/*",
-    "//src/graphics/lib/compute/tests/vk-swapchain-test/*",
-    "//src/graphics/lib/compute/tests/vk-transfer-test/*",
-    "//src/graphics/lib/compute/tests/vk-triangle-test/*",
     "//src/graphics/lib/goldfish-vulkan/gnbuild/*",
     "//src/graphics/lib/magma/*",
     "//src/graphics/lib/magma/gnbuild/magma-arm-mali/*",
@@ -594,25 +556,15 @@
     "//src/lib/intl/lookup/cpp/*",
     "//src/lib/intl/lookup/rust/*",
     "//src/lib/intl/unicode_utils/char_collection/*",
-    "//src/lib/isolated_devmgr/v2_component/*",
     "//src/lib/json_parser/*",
     "//src/lib/line_input/*",
     "//src/lib/listnode/*",
     "//src/lib/mapped-vmo/*",
     "//src/lib/mpmc/*",
     "//src/lib/mundane/*",
-    "//src/lib/network/ethernet/*",
-    "//src/lib/network/fidl_fuchsia_hardware_ethernet_ext/*",
-    "//src/lib/network/fidl_fuchsia_net_ext/*",
-    "//src/lib/network/fidl_fuchsia_net_name_ext/*",
-    "//src/lib/network/fidl_fuchsia_net_stack_ext/*",
-    "//src/lib/network/fidl_fuchsia_netstack_ext/*",
-    "//src/lib/network/packet-formats/*",
-    "//src/lib/network/packet/*",
     "//src/lib/pkg_url/*",
     "//src/lib/scoped_task/*",
     "//src/lib/shared-buffer/*",
-    "//src/lib/storage/*",
     "//src/lib/syslog/go/*",
     "//src/lib/syslog/rust/*",
     "//src/lib/syslog/rust/tests/*",
@@ -642,14 +594,11 @@
     "//src/media/audio/audio_core/mixer/*",
     "//src/media/audio/audio_core/mixer/test/*",
     "//src/media/audio/audio_core/test/api/*",
-    "//src/media/audio/audio_core/test/hardware/*",
-    "//src/media/audio/drivers/alc5663/tests/*",
-    "//src/media/audio/drivers/astro-pdm-input/test/*",
+    "//src/media/audio/drivers/codecs/alc5663/tests/*",
     "//src/media/audio/drivers/codecs/max98373/*",
     "//src/media/audio/drivers/codecs/tas5782/*",
     "//src/media/audio/drivers/codecs/tas58xx/*",
     "//src/media/audio/drivers/intel-hda/controller/*",
-    "//src/media/audio/drivers/sherlock-pdm-input/test/*",
     "//src/media/audio/drivers/test/*",
     "//src/media/audio/drivers/virtual_audio/*",
     "//src/media/audio/examples/effects/*",
@@ -706,6 +655,7 @@
     "//src/power/power-manager/rkf45/*",
     "//src/power/shutdown-shim/tests/*",
     "//src/power/temperature-logger/*",
+    "//src/power/thermd/*",
     "//src/recovery/factory_reset/*",
     "//src/recovery/integration/*",
     "//src/recovery/system/*",
@@ -731,53 +681,14 @@
     "//src/speech/tts/*",
     "//src/storage/bin/disk-pave/*",
     "//src/storage/blobfs-corrupt/*",
-    "//src/storage/blobfs-stress-test/*",
     "//src/storage/ext4/lib/*",
     "//src/storage/ext4/read-only/*",
     "//src/storage/ext4/server/*",
     "//src/storage/extractor/*",
     "//src/storage/factory/export_ffs/*",
     "//src/storage/volume_image/*",
-    "//src/sys/appmgr/*",
-    "//src/sys/appmgr/integration_tests/*",
-    "//src/sys/appmgr/integration_tests/component_events/*",
-    "//src/sys/appmgr/integration_tests/components/*",
-    "//src/sys/appmgr/integration_tests/inspect/*",
-    "//src/sys/appmgr/integration_tests/lifecycle/*",
-    "//src/sys/appmgr/integration_tests/logs/*",
-    "//src/sys/appmgr/integration_tests/mock_runner/*",
-    "//src/sys/appmgr/integration_tests/outdir/*",
-    "//src/sys/appmgr/integration_tests/policy/*",
-    "//src/sys/appmgr/integration_tests/sandbox/*",
-    "//src/sys/appmgr/integration_tests/sandbox/features/ambient-executable-policy/*",
-    "//src/sys/appmgr/integration_tests/sandbox/features/build-info/*",
-    "//src/sys/appmgr/integration_tests/sandbox/features/durable-data/*",
-    "//src/sys/appmgr/integration_tests/sandbox/features/factory-data/*",
-    "//src/sys/appmgr/integration_tests/sandbox/features/hub/*",
-    "//src/sys/appmgr/integration_tests/sandbox/features/isolated-temp/*",
-    "//src/sys/appmgr/integration_tests/sandbox/features/misc-storage/*",
-    "//src/sys/appmgr/integration_tests/sandbox/features/shell-commands/*",
-    "//src/sys/appmgr/integration_tests/sandbox/features/shell/*",
-    "//src/sys/appmgr/integration_tests/sandbox/features/storage/*",
-    "//src/sys/appmgr/integration_tests/sandbox/features/system-temp/*",
-    "//src/sys/appmgr/integration_tests/sandbox/services/*",
-    "//src/sys/appmgr/integration_tests/util/*",
+    "//src/sys/appmgr:system_data_file",
     "//src/sys/component_index/*",
-    "//src/sys/component_manager/src/elf_runner/tests/*",
-    "//src/sys/component_manager/tests/base_resolver_test/*",
-    "//src/sys/component_manager/tests/collections/*",
-    "//src/sys/component_manager/tests/destruction/*",
-    "//src/sys/component_manager/tests/elf_runner/*",
-    "//src/sys/component_manager/tests/events/*",
-    "//src/sys/component_manager/tests/hub/*",
-    "//src/sys/component_manager/tests/panic-test/*",
-    "//src/sys/component_manager/tests/rights/*",
-    "//src/sys/component_manager/tests/routing/*",
-    "//src/sys/component_manager/tests/security_policy/ambient_mark_vmo_exec/*",
-    "//src/sys/component_manager/tests/security_policy/main_process_critical/*",
-    "//src/sys/component_manager/tests/shutdown/*",
-    "//src/sys/component_manager/tests/storage/*",
-    "//src/sys/component_manager/tests/work_scheduler/*",
     "//src/sys/component_manager_for_test/*",
     "//src/sys/component_manager_for_test/tests/*",
     "//src/sys/component_test_runner/*",
@@ -786,18 +697,7 @@
     "//src/sys/gtest_v1_runner/*",
     "//src/sys/gtest_v1_runner/tests/*",
     "//src/sys/installer/*",
-    "//src/sys/lib/cm_fidl_validator/*",
-    "//src/sys/lib/cm_json/*",
-    "//src/sys/lib/cm_rust/*",
-    "//src/sys/lib/cm_types/*",
-    "//src/sys/lib/component_id_index/*",
-    "//src/sys/lib/fidl-connector/*",
-    "//src/sys/lib/fidl-fuchsia-pkg-ext/*",
-    "//src/sys/lib/fidl-fuchsia-pkg-rewrite-ext/*",
-    "//src/sys/lib/fuchsia-bootfs/*",
-    "//src/sys/lib/fuchsia_backoff/*",
     "//src/sys/lib/library_loader/*",
-    "//src/sys/lib/runner/*",
     "//src/sys/locate/*",
     "//src/sys/pkg/bin/amber/*",
     "//src/sys/pkg/bin/fake-channel-control/*",
@@ -819,7 +719,6 @@
     "//src/sys/pkg/lib/fidl-fuchsia-update-ext/*",
     "//src/sys/pkg/lib/fidl-fuchsia-update-installer-ext/*",
     "//src/sys/pkg/lib/forced-fdr/*",
-    "//src/sys/pkg/lib/fuchsia-archive/*",
     "//src/sys/pkg/lib/fuchsia-pkg-testing/*",
     "//src/sys/pkg/lib/isolated-swd/*",
     "//src/sys/pkg/lib/omaha-client/*",
@@ -876,7 +775,6 @@
     "//src/tests/benchmarks/fidl/rust/*",
     "//src/tests/benchmarks/fidl/walker/*",
     "//src/tests/fidl/fidl_go_conformance/*",
-    "//src/tests/intl/timestamp-server-dart/*",
     "//src/tests/intl/timezone/*",
     "//src/tests/microbenchmarks/*",
     "//src/ui/a11y/bin/a11y_manager/*",
@@ -919,12 +817,12 @@
     "//src/ui/scenic/lib/utils/*",
     "//src/ui/testing/text/*",
     "//src/ui/testing/views/*",
-    "//src/ui/tests/e2e_flutter_tests/embedder/*",
-    "//src/ui/tests/e2e_input_tests/keyboard/*",
-    "//src/ui/tests/e2e_input_tests/scenic/*",
-    "//src/ui/tests/e2e_input_tests/shortcut/*",
-    "//src/ui/tests/e2e_input_tests/touch/*",
-    "//src/ui/tests/e2e_input_tests/touch/cpp-gfx-client/*",
+    "//src/ui/tests/integration_flutter_tests/embedder/*",
+    "//src/ui/tests/integration_input_tests/keyboard/*",
+    "//src/ui/tests/integration_input_tests/scenic/*",
+    "//src/ui/tests/integration_input_tests/shortcut/*",
+    "//src/ui/tests/integration_input_tests/touch/*",
+    "//src/ui/tests/integration_input_tests/touch/cpp-gfx-client/*",
     "//src/ui/tests/scenic-stress-tests/*",
     "//src/ui/tools/*",
     "//src/ui/tools/present_view/tests/*",
@@ -934,20 +832,16 @@
     "//src/ui/tools/tiles_ctl/*",
     "//src/virtualization/bin/guest/*",
     "//src/virtualization/bin/guest_manager/*",
-    "//src/virtualization/bin/guest_runner/*",
     "//src/virtualization/bin/vmm/*",
     "//src/virtualization/bin/vmm/device/*",
     "//src/virtualization/lib/grpc/*",
     "//src/virtualization/lib/guest_interaction/*",
     "//src/virtualization/lib/vsh/*",
-    "//src/virtualization/packages/biscotti_guest/*",
     "//src/virtualization/packages/biscotti_guest/linux_runner/*",
     "//src/virtualization/packages/debian_guest/*",
-    "//src/virtualization/packages/linux_guest/*",
     "//src/virtualization/packages/termina_guest/*",
     "//src/virtualization/packages/zircon_guest/*",
     "//src/virtualization/tests/*",
-    "//src/zircon/tests/kernel-clocks/*",
     "//third_party/Vulkan-Tools/cube/*",
     "//third_party/boringssl/*",
     "//third_party/cobalt/*",
@@ -956,42 +850,18 @@
     "//third_party/iperf/*",
     "//third_party/libc-tests/*",
     "//third_party/micro-ecc/*",
-    "//third_party/network-conformance/*",
     "//third_party/openssh-portable/*",
     "//third_party/openssl-ecjpake/*",
     "//third_party/quickjs/*",
     "//third_party/sbase/*",
-    "//third_party/vim/*",
-    "//third_party/vulkan-cts/fuchsia/*",
-    "//third_party/vulkan-cts/fuchsia/split-cts/*",
     "//tools/blackout/blackout-target/*",
-    "//tools/blackout/blobfs-fsck-soft/*",
     "//tools/blackout/minfs-fsck/*",
     "//tools/cmc/*",
     "//tools/create/goldens/my-driver-cpp/*",
     "//tools/fidl/measure-tape/*",
     "//tools/fidlcat/tests/*",
-    "//topaz/public/dart/fuchsia_inspect/codelab/client/*",
-    "//topaz/public/dart/fuchsia_inspect/codelab/fizzbuzz/*",
-    "//topaz/public/dart/fuchsia_inspect/codelab/part_1/*",
-    "//topaz/public/dart/fuchsia_inspect/codelab/part_2/*",
-    "//topaz/public/dart/fuchsia_inspect/codelab/part_3/*",
-    "//topaz/public/dart/fuchsia_inspect/codelab/part_4/*",
-    "//topaz/public/dart/fuchsia_inspect/codelab/part_5/*",
-    "//topaz/public/dart/fuchsia_inspect/examples/inspect_mod/*",
-    "//topaz/public/dart/fuchsia_inspect/test/inspect_flutter_integration_tester/*",
     "//topaz/public/dart/fuchsia_inspect/test/integration/*",
     "//topaz/public/dart/fuchsia_inspect/test/validator_puppet/*",
-    "//topaz/public/dart/fuchsia_inspect_flutter/examples/torus15/*",
-    "//topaz/public/dart/fuchsia_logger/test/integration/*",
-    "//topaz/public/dart/fuchsia_modular/*",
-    "//topaz/public/dart/fuchsia_modular/examples/fibonacci_agent/*",
-    "//topaz/public/dart/fuchsia_modular/examples/slider_mod/*",
-    "//topaz/public/dart/fuchsia_modular_testing/*",
-    "//topaz/public/dart/fuchsia_services/*",
-    "//topaz/public/dart/fuchsia_services/test_support/*",
-    "//topaz/public/dart/fuchsia_vfs/*",
-    "//topaz/tests/dart-inspect-benchmarks/*",
     "//vendor/*",
     "//zircon/kernel/lib/boot-options/tests/*",
     "//zircon/kernel/lib/devicetree/tests/*",
@@ -1007,689 +877,3 @@
     "//zircon/system/utest/stdio/*",
   ]
 }
-
-group("deprecated_package_underscore") {
-  #  ________  _________  ________  ________
-  # |\   ____\|\___   ___\\   __  \|\   __  \
-  # \ \  \___|\|___ \  \_\ \  \|\  \ \  \|\  \
-  #  \ \_____  \   \ \  \ \ \  \\\  \ \   ____\
-  #   \|____|\  \   \ \  \ \ \  \\\  \ \  \___|
-  #     ____\_\  \   \ \__\ \ \_______\ \__\
-  #    |\_________\   \|__|  \|_______|\|__|
-  #    \|_________|
-  # This is an allowlist of packages that have underscores in their names.
-  # These packages are in violation of the fuchsia-pkg URL specification
-  # https://fuchsia.dev/fuchsia-src/concepts/packages/package_url#package_identity
-  # fxbug.dev/22679
-
-  # TODO(fxbug.dev/22679): Provide a mechanism for vendor to temporarily contribute to this list.
-  visibility = [ "//vendor/*" ]
-
-  # TODO(fxbug.dev/48237): replace system_image with system-image, see bug for details.
-  visibility += [ "//build/images/*" ]
-
-  # Do not add new entries to this list.
-  visibility += [
-    "//examples/criterion_bench:criterion_bench",
-    "//examples/diagnostics/inspect/codelab/cpp:inspect_cpp_codelab",
-    "//examples/diagnostics/inspect/codelab/cpp:inspect_cpp_codelab_integration_tests",
-    "//examples/diagnostics/inspect/codelab/cpp:inspect_cpp_codelab_unittests",
-    "//examples/diagnostics/inspect/codelab/rust:inspect_rust_codelab",
-    "//examples/diagnostics/inspect/codelab/rust:inspect_rust_codelab_integration_tests",
-    "//examples/diagnostics/inspect/codelab/rust:inspect_rust_codelab_unittests",
-    "//examples/diagnostics/inspect/cpp:example_server",
-    "//examples/diagnostics/inspect/cpp:example_server_unittests",
-    "//examples/diagnostics/inspect/rust:inspect_example_rs_tests",
-    "//examples/diagnostics/inspect/rust-vmo:inspect_vmo_example",
-    "//examples/diagnostics/inspect/rust-vmo:inspect_vmo_example_tests",
-    "//examples/dotmatrix_display:dotmatrix_display",
-    "//examples/fidl/echo_client/cpp:echo_client",
-    "//examples/fidl/echo_server/cpp:echo_server",
-    "//examples/fidl/echo_test/cpp:echo_test",
-    "//examples/fuzzers/fidl:echo_fuzzers_pkg",
-    "//examples/hello_world/cpp:hello_world_cpp",
-    "//examples/hello_world/cpp:hello_world_cpp_tests",
-    "//examples/hello_world/rust:hello_world_rust",
-    "//examples/hello_world/rust:hello_world_rust_tests",
-    "//examples/intl/tz_version_parrot:tz_version_parrot",
-    "//examples/intl/tz_version_parrot/test:tz_version_parrot_test",
-    "//examples/isolated_cache:isolated_cache_example",
-    "//examples/isolated_cache:isolated_cache_example_tests",
-    "//examples/rust/rust_static_linking:static_linking_tests",
-    "//garnet/bin/catapult_converter:catapult_converter",
-    "//garnet/bin/catapult_converter:catapult_converter_tests",
-    "//garnet/bin/cpuperf/tests:cpuperf_tests",
-    "//garnet/bin/cpuperf_provider:cpuperf_provider",
-    "//garnet/bin/cpuperf_provider:cpuperf_provider_tests",
-    "//garnet/bin/insntrace/tests:insntrace_tests",
-    "//garnet/bin/ktrace_provider:ktrace_provider",
-    "//garnet/bin/ktrace_provider:ktrace_provider_tests",
-    "//garnet/bin/log_listener:log_listener",
-    "//garnet/bin/log_listener:log_listener_shell",
-    "//garnet/bin/log_listener:log_listener_tests",
-    "//garnet/bin/metric_broker:metric_broker",
-    "//garnet/bin/metric_broker/config/cobalt/test:metric_broker_config_cobalt_tests",
-    "//garnet/bin/metric_broker/config/inspect/test:metric_broker_config_inspect_tests",
-    "//garnet/bin/metric_broker/config/test:metric_broker_config_tests",
-    "//garnet/bin/network_settings_server:network_settings_server",
-    "//garnet/bin/network_time/test:system_time_updater_test",
-    "//garnet/bin/odu:odu_tests",
-    "//garnet/bin/run_test_component:run-test-component-pkg",
-    "//garnet/bin/run_test_component:run_test_component_unittests",
-    "//garnet/bin/run_test_component/test:run_test_component_test",
-    "//garnet/bin/sched:sched_tests",
-    "//garnet/bin/setui:setui_service",
-    "//garnet/bin/setui:setui_service_tests",
-    "//garnet/bin/setui_client:setui_client",
-    "//garnet/bin/setui_client:setui_client_interface_test",
-    "//garnet/bin/setui_client:setui_client_tests",
-    "//garnet/bin/sysmem_connector:sysmem_connector",
-    "//garnet/bin/sysmem_connector:sysmem_connector_test",
-    "//garnet/bin/terminal:term_model_tests",
-    "//garnet/bin/terminal:terminal_tests",
-    "//garnet/bin/test_runner:run_integration_tests",
-    "//garnet/bin/test_runner:run_test",
-    "//garnet/bin/test_runner:test_runner_tests",
-    "//garnet/bin/thermd:thermd_tests_package",
-    "//garnet/bin/timezone/test:timezone_tests",
-    "//garnet/bin/trace/tests:trace_tests",
-    "//garnet/bin/trace_manager:trace_manager",
-    "//garnet/bin/trace_manager/tests:trace_manager_tests",
-    "//garnet/bin/trace_stress:trace_stress",
-    "//garnet/bin/ui/benchmarks/image_grid_cpp:image_grid_cpp",
-    "//garnet/bin/ui/benchmarks/transparency_benchmark:transparency_benchmark",
-    "//garnet/bin/vsock_service:vsock_service",
-    "//garnet/bin/vsock_service:vsock_service-tests",
-    "//garnet/bin/wayland:wayland_tests",
-    "//garnet/examples/components:component_hello_world",
-    "//garnet/examples/components:component_hello_world_tests",
-    "//garnet/examples/fidl/echo_client_cpp:echo_client_cpp",
-    "//garnet/examples/fidl/echo_client_cpp_synchronous:echo_client_cpp_synchronous",
-    "//garnet/examples/fidl/echo_client_go:echo_client_go",
-    "//garnet/examples/fidl/echo_client_llcpp:echo_client_llcpp",
-    "//garnet/examples/fidl/echo_client_rust:echo_client_rust",
-    "//garnet/examples/fidl/echo_client_rust_synchronous:echo_client_rust_synchronous",
-    "//garnet/examples/fidl/echo_server_c:echo_server_c",
-    "//garnet/examples/fidl/echo_server_cpp:echo_server_cpp",
-    "//garnet/examples/fidl/echo_server_cpp:echo_server_cpp_tests",
-    "//garnet/examples/fidl/echo_server_go:echo_server_go",
-    "//garnet/examples/fidl/echo_server_llcpp:echo_server_llcpp",
-    "//garnet/examples/http/httpget_cpp:httpget_cpp",
-    "//garnet/examples/http/httpget_rust:httpget_rust",
-    "//garnet/examples/intl/manager:intl_property_manager",
-    "//garnet/examples/intl/manager:intl_property_manager_tests",
-    "//garnet/examples/intl/wisdom/cpp:intl_wisdom",
-    "//garnet/examples/intl/wisdom/rust:intl_wisdom_rust",
-    "//garnet/examples/intl/wisdom/rust/client:intl_wisdom_client_rust_tests",
-    "//garnet/examples/intl/wisdom/rust/server:intl_wisdom_server_rust_tests",
-    "//garnet/examples/intl/wisdom/tests:intl_wisdom_test",
-    "//garnet/examples/profiler:profiler_example",
-    "//garnet/examples/tcp/tcp_file_sender:tcp_file_sender",
-    "//garnet/go/src/benchmarking:go_benchmarking_tests",
-    "//garnet/go/src/device_settings:device_settings_gotests",
-    "//garnet/go/src/far:go_far_tests",
-    "//garnet/go/src/go-tuf:go_tuf_tests",
-    "//garnet/go/src/inspect:go_inspect_tests",
-    "//garnet/go/src/merkle:go_merkle_tests",
-    "//garnet/go/src/thinfs:go_thinfs_tests",
-    "//garnet/lib/debugger_utils:debugger_utils_tests",
-    "//garnet/lib/perfmon:perfmon_tests",
-    "//garnet/lib/process:process_unittests",
-    "//garnet/lib/profiler/tests:profiler_tests",
-    "//garnet/lib/rust/files_async:files_async_tests",
-    "//garnet/lib/rust/io_util:io_util_tests",
-    "//garnet/lib/rust/netfilter:netfilter_tests",
-    "//garnet/lib/rust/webpki-roots-fuchsia:webpki-roots-fuchsia_tests",
-    "//garnet/lib/trace_converters:trace_converter_tests",
-    "//garnet/public/lib/fostr:tests_package",
-    "//garnet/tests/benchmarks:garnet_benchmarks",
-    "//garnet/tests/zircon:zircon_fuzzers_pkg",
-    "//sdk/lib/fuzzing/cpp:fuzzing_traits_tests",
-    "//sdk/lib/inspect/contrib/cpp:inspect_contrib_cpp_tests",
-    "//sdk/lib/inspect/contrib/cpp/tests:archive_reader_integration_tests",
-    "//sdk/lib/inspect/service/cpp/tests:inspect_service_cpp_tests",
-    "//sdk/lib/media/cpp:media_lib_timeline_tests",
-    "//sdk/lib/modular/cpp:modular_cpp_tests",
-    "//sdk/lib/modular/testing/cpp:modular_testing_cpp_tests",
-    "//sdk/lib/svc:svc_tests",
-    "//sdk/lib/sys/cpp/testing/environment_delegating_runner:environment_delegating_runner",
-    "//sdk/lib/sys/cpp/testing/examples:component_context_provider_example_test",
-    "//sdk/lib/sys/cpp/testing/examples/test_with_environment:test_with_environment_example_test",
-    "//sdk/lib/sys/cpp/tests:component_cpp_tests",
-    "//sdk/lib/sys/inspect/cpp/tests:sys_inspect_cpp_tests",
-    "//sdk/lib/sys/service/cpp:sys_service_tests",
-    "//sdk/lib/ui/scenic/cpp/tests:scenic_cpp_tests",
-    "//sdk/lib/vfs/cpp:vfs_cpp_tests",
-    "//sdk/lib/virtualization:virtualization_lib_tests",
-    "//src/camera:camera_tests",
-    "//src/camera/bin/device:camera_device",
-    "//src/camera/bin/device_watcher:camera_device_watcher",
-    "//src/camera/drivers/usb_video:usb_video",
-    "//src/camera/drivers/virtual_camera:virtual_camera",
-    "//src/camera/examples/camera_display:camera_display",
-    "//src/chromium/web_runner_tests:web_runner_tests",
-    "//src/cobalt/bin:cobalt_fuzzers_pkg",
-    "//src/cobalt/bin:cobalt_tests",
-    "//src/cobalt/bin:cobalt_tests_do_not_run_manually",
-    "//src/cobalt/bin/system-metrics:cobalt_system_metrics",
-    "//src/cobalt/bin/testing/mock_cobalt:mock_cobalt",
-    "//src/connectivity/bluetooth/core/bt-host:bluetooth_fuzzers_pkg",
-    "//src/connectivity/location/regulatory_region:regulatory_region",
-    "//src/connectivity/location/regulatory_region:regulatory_region_tests",
-    "//src/connectivity/location/regulatory_region/tests:regulatory_region_integration_test",
-    "//src/connectivity/lowpan/drivers/lowpan_spinel_driver:lowpan_spinel_driver",
-    "//src/connectivity/lowpan/drivers/spinel_spi_driver:spinel_spi_driver",
-    "//src/connectivity/network/netstack:netstack_gotests",
-    "//src/connectivity/network/netstack/dhcp:netstack_dhcp_gotests",
-    "//src/connectivity/network/netstack/dns:netstack_dns_gotests",
-    "//src/connectivity/network/netstack/routes:netstack_routes_gotests",
-    "//src/connectivity/network/testing/netemul/lib:netemul_lib_test",
-    "//src/connectivity/network/testing/netemul/runner:netemul_devmgr",
-    "//src/connectivity/network/testing/netemul/runner:netemul_sandbox",
-    "//src/connectivity/network/testing/netemul/runner/runner:netemul_runner",
-    "//src/connectivity/network/tests/benchmarks:netstack_benchmarks",
-    "//src/connectivity/network/tools/network-speed-test/integration:network_speed_test_test",
-    "//src/connectivity/network/tools/sockscripter:sockscripter_unittest",
-    "//src/connectivity/openthread/tests/ot-devmgr:ot_devmgr_component_integration",
-    "//src/connectivity/telephony/lib/tel-devmgr:tel_devmgr",
-    "//src/connectivity/telephony/lib/tel-devmgr:tel_devmgr_at_component_test",
-    "//src/connectivity/telephony/lib/tel-devmgr:tel_devmgr_qmi_component_test",
-    "//src/connectivity/telephony/telephony:telephony_tests",
-    "//src/connectivity/weave/adaptation/tests:adaptation_tests",
-    "//src/connectivity/wlan/drivers/testing/lib/sim-device/test:sim_device_test",
-    "//src/connectivity/wlan/drivers/third_party/atheros/ath10k:ath10k_test",
-    "//src/connectivity/wlan/drivers/third_party/intel/iwlwifi/test:iwlwifi_test",
-    "//src/connectivity/wlan/drivers/wlan:wlan_drivers_tests",
-    "//src/connectivity/wlan/drivers/wlanif/test:wlanif_unittest",
-    "//src/connectivity/wlan/lib/mlme/cpp/tests:wlan_tests",
-    "//src/connectivity/wlan/testing/wlan-devmgr:wlan_devmgr",
-    "//src/developer/debug/debug_agent:debug_agent",
-    "//src/developer/debug/debug_agent:debug_agent_tests",
-    "//src/developer/kcounter_inspect:kcounter_inspect",
-    "//src/developer/kcounter_inspect/tests:kcounter_inspect_tests",
-    "//src/developer/memory/metrics/tests:memory_metrics_tests",
-    "//src/developer/memory/monitor:monitor",
-    "//src/developer/memory/monitor/tests:memory_monitor_inspect_integration_tests",
-    "//src/developer/memory/monitor/tests:memory_monitor_tests",
-    "//src/developer/shell:shell_tests",
-    "//src/developer/shell/interpreter:shell_interpreter_tests",
-    "//src/developer/shell/interpreter:shell_server",
-    "//src/developer/system_monitor/bin/harvester:system_monitor_harvester",
-    "//src/developer/system_monitor/bin/harvester:system_monitor_harvester_tests",
-    "//src/developer/tracing/lib/test_utils:tracing_test_utils_tests",
-    "//src/devices/bin/driver_host:driver_host_tests",
-    "//src/devices/bin/driver_manager:driver_manager_tests",
-    "//src/devices/bind/debugger:bind_debugger",
-    "//src/devices/bind/debugger:bind_debugger_tests",
-    "//src/devices/clock/lib/clocktree/tests:clocktree_test",
-    "//src/experiences/benchmarks/bin/button_flutter:button_flutter_pkg",
-    "//src/experiences/session_shells/ermine/login_shell:userpicker_base_shell_pkg",
-    "//src/experiences/session_shells/ermine/session:workstation_session",
-    "//src/experiences/session_shells/ermine/session:workstation_session_tests",
-    "//src/experiences/settings/bluetooth:bluetooth_settings_pkg",
-    "//src/experiences/settings/datetime:datetime_settings_pkg",
-    "//src/experiences/settings/wifi:wifi_settings_pkg",
-    "//src/factory/factory_store_providers:factory_store_providers",
-    "//src/factory/factory_store_providers:factory_store_providers_ext4_tests",
-    "//src/factory/factory_store_providers:factory_store_providers_tests",
-    "//src/factory/factoryctl:factoryctl_tests",
-    "//src/factory/fake_factory_items:fake_factory_items",
-    "//src/factory/fake_factory_items:fake_factory_items_tests",
-    "//src/factory/fake_factory_store_providers:fake_factory_store_providers",
-    "//src/factory/fake_factory_store_providers:fake_factory_store_providers_tests",
-    "//src/fonts:font_provider_unit_tests",
-    "//src/fonts/char_set:char_set_tests",
-    "//src/fonts/font_info:font_info_tests",
-    "//src/fonts/manifest:manifest_tests",
-    "//src/fonts/offset_string:offset_string_tests",
-    "//src/fonts/tests/integration:font_provider_integration_tests",
-    "//src/fonts/tests/smoke:font_provider_smoke_tests",
-    "//src/graphics/bin/vulkan_loader:vulkan_loader",
-    "//src/graphics/drivers/misc/goldfish_address_space:goldfish_address_space",
-    "//src/graphics/drivers/misc/goldfish_control:goldfish_control",
-    "//src/graphics/drivers/msd-img-rgx/tests/integration:msd_img_rgx_integration_tests",
-    "//src/graphics/drivers/msd-img-rgx/tests/unit_tests:msd_img_rgx_nonhardware_tests",
-    "//src/graphics/lib/magma/gnbuild/magma-arm-mali:msd_arm_mali_nonhardware_tests",
-    "//src/graphics/lib/magma/gnbuild/magma-intel-gen:magma_intel_gen_integration_tests",
-    "//src/graphics/lib/magma/gnbuild/magma-intel-gen:magma_intel_gen_nonhardware_tests",
-    "//src/graphics/lib/magma/gnbuild/magma-vsi-vip:msd_vsi_vip_nonhardware_tests",
-    "//src/graphics/lib/magma/tests:magma_abi_conformance_tests",
-    "//src/graphics/lib/magma/tests:magma_info_test",
-    "//src/graphics/lib/magma/tests:magma_nonhardware_tests",
-    "//src/graphics/lib/magma/tests/icd_strip:test_icd_strip",
-    "//src/graphics/lib/magma/tests/unit_tests:testing_firmware_file",
-    "//src/graphics/tests/goldfish_benchmark:goldfish_benchmark",
-    "//src/graphics/tests/goldfish_test:goldfish_test",
-    "//src/hwinfo:fake_hwinfo",
-    "//src/hwinfo:hwinfo_negative_tests",
-    "//src/hwinfo:hwinfo_positive_tests",
-    "//src/identity/bin:identity_bin_unittests",
-    "//src/identity/bin/account_handler:account_handler",
-    "//src/identity/bin/account_manager:account_manager",
-    "//src/identity/bin/dev_auth_provider:dev_auth_provider",
-    "//src/identity/bin/dev_authenticator:dev_authenticator",
-    "//src/identity/bin/google_auth_provider:google_auth_provider",
-    "//src/identity/bin/token_manager_factory:token_manager_factory",
-    "//src/identity/lib:identity_lib_unittests",
-    "//src/identity/tests/account_manager_integration:account_manager_integration_test_pkg",
-    "//src/identity/tests/account_manager_integration:account_manager_prototype",
-    "//src/identity/tests/token_manager_integration:token_manager_integration_test_pkg",
-    "//src/identity/tools:identity_tools_unittests",
-    "//src/intl/intl_services:pkg",
-    "//src/intl/intl_services/tests:pkg",
-    "//src/lib/backoff:backoff_tests",
-    "//src/lib/by_addr:by_addr_tests",
-    "//src/lib/callback:callback_tests",
-    "//src/lib/cmx:cmx_fuzzers_pkg",
-    "//src/lib/cmx:cmx_tests",
-    "//src/lib/cmx/facet_parser:cmx_facet_parser_tests",
-    "//src/lib/cobalt:cobalt_lib_tests",
-    "//src/lib/containers/cpp:containers_cpp_tests",
-    "//src/lib/elflib:elflib_fuzzers_pkg",
-    "//src/lib/elflib:elflib_tests",
-    "//src/lib/error/clonable_error:clonable_error_tests",
-    "//src/lib/fake-clock/examples/go:fake_clock_gotests",
-    "//src/lib/fake-clock/lib:fake_clock_lib_test",
-    "//src/lib/fake-clock/svc:svc",
-    "//src/lib/fake-clock/svc:tests",
-    "//src/lib/fidl_codec:fidl_codec_tests",
-    "//src/lib/fidl_table_validation/fidl_table_validation_tests:fidl_table_validation_tests",
-    "//src/lib/fsl:fsl_tests",
-    "//src/lib/fuchsia-component/isolated_component_test:fuchsia_component_test_inner",
-    "//src/lib/fuchsia-component/isolated_component_test:fuchsia_component_test_middle",
-    "//src/lib/fuchsia-component/isolated_component_test:fuchsia_component_test_middle_sibling",
-    "//src/lib/fuchsia-component/isolated_component_test:fuchsia_component_test_outer",
-    "//src/lib/fxl:fxl_tests",
-    "//src/lib/icu_data/rust/icu_data:icu_data_rust_tests",
-    "//src/lib/inet:libinet_tests",
-    "//src/lib/intl/intl_property_provider_impl:intl_property_provider_impl_tests",
-    "//src/lib/intl/unicode_utils/char_collection:char_collection_tests",
-    "//src/lib/isolated_devmgr:isolated_devmgr_tests",
-    "//src/lib/json_parser:json_parser_unittests",
-    "//src/lib/line_input:line_input_tests",
-    "//src/lib/mpmc:mpmc_tests",
-    "//src/lib/network_wrapper:network_wrapper_tests",
-    "//src/lib/pkg_url:pkg_url_fuzzers_pkg",
-    "//src/lib/pkg_url:pkg_url_unittests",
-    "//src/lib/storage:vfs_tests",
-    "//src/lib/storage/fs_management:fs_management_tests",
-    "//src/lib/storage/ramdevice_client:ramdevice_client_tests",
-    "//src/lib/streammap:streammap_tests",
-    "//src/lib/test_executor/rust:test_executor_unittests",
-    "//src/lib/test_util:rust_test_util_tests",
-    "//src/lib/testing/loop_fixture:loop_fixture_tests",
-    "//src/lib/timekeeper:timekeeper_tests",
-    "//src/lib/ui/base_view:base_view_unittests",
-    "//src/lib/ui/fuchsia-framebuffer:fb_box_rs",
-    "//src/lib/ui/fuchsia-framebuffer:fb_integration_test",
-    "//src/lib/ui/input:input_unit_tests",
-    "//src/lib/uuid:uuid_tests",
-    "//src/lib/waitgroup:waitgroup_tests",
-    "//src/lib/zerocopy:zerocopy_tests",
-    "//src/media/audio:audio_fuzzers_pkg",
-    "//src/media/audio/audio/test:audio_loopback_tests",
-    "//src/media/audio/audio_core:audio_core",
-    "//src/media/audio/audio_core:audio_core_unittests",
-    "//src/media/audio/audio_core/mixer:audio_mixer_unittests",
-    "//src/media/audio/audio_core/mixer/test:audio_fidelity_tests",
-    "//src/media/audio/audio_core/test/audio_admin:audio_admin_tests",
-    "//src/media/audio/audio_core/test/device:audio_device_tests",
-    "//src/media/audio/audio_core/test/fidl:audio_fidl_tests",
-    "//src/media/audio/audio_core/test/hardware:audio_core_hardware_tests",
-    "//src/media/audio/audio_core/test/pipeline:audio_pipeline_tests",
-    "//src/media/audio/audio_core/test/service:audio_device_service_tests",
-    "//src/media/audio/drivers/test:audio_driver_tests",
-    "//src/media/audio/drivers/virtual_audio:virtual_audio",
-    "//src/media/audio/examples/effects:audio_effects_example_tests",
-    "//src/media/audio/examples/simple_sine:simple_sine",
-    "//src/media/audio/examples/sync_sine:sync_sine",
-    "//src/media/audio/lib/audio_test_devmgr:audio_test_devmgr",
-    "//src/media/audio/lib/effects_loader:audio_effects_loader_unittests",
-    "//src/media/audio/lib/wav_writer:audio_wav_writer_unittests",
-    "//src/media/audio/tools/signal_generator:signal_generator",
-    "//src/media/audio/tools/virtual_audio_util:virtual_audio_util",
-    "//src/media/audio/tools/wav_recorder:wav_recorder",
-    "//src/media/audio/virtual_audio_service:virtual_audio_service",
-    "//src/media/codec:codec_factory",
-    "//src/media/codec:codec_runner_sw_aac",
-    "//src/media/codec:codec_runner_sw_ffmpeg",
-    "//src/media/codec:codec_runner_sw_sbc",
-    "//src/media/codec/codecs/test:chunk_input_stream_tests",
-    "//src/media/codec/codecs/test:codec_adapter_sw_test",
-    "//src/media/codec/codecs/test:output_sink_tests",
-    "//src/media/codec/codecs/test:timestamp_extrapolator_tests",
-    "//src/media/codec/examples:h264_decoder_fuzzer_test",
-    "//src/media/codec/examples:use_aac_decoder_test",
-    "//src/media/codec/examples:use_h264_decoder_secure_input_output_test",
-    "//src/media/codec/examples:use_h264_decoder_secure_output_test",
-    "//src/media/codec/examples:use_h264_decoder_stream_switching_test",
-    "//src/media/codec/examples:use_h264_decoder_test",
-    "//src/media/codec/examples:use_media_decoder",
-    "//src/media/codec/examples:use_vp9_decoder_24_output_frames_test",
-    "//src/media/codec/examples:use_vp9_decoder_secure_input_output_test",
-    "//src/media/codec/examples:use_vp9_decoder_secure_output_test",
-    "//src/media/codec/examples:use_vp9_decoder_skip_first_frame_test",
-    "//src/media/codec/examples:use_vp9_decoder_test",
-    "//src/media/codec/examples:vp9_decoder_conformance_http",
-    "//src/media/codec/examples:vp9_decoder_fuzzer_test",
-    "//src/media/codec/factory:codec_detect_hw_decode_test",
-    "//src/media/codec/factory:codec_detect_hw_encode_test",
-    "//src/media/drivers/amlogic_decoder:amlogic_decoder_firmware",
-    "//src/media/drivers/amlogic_decoder:amlogic_decoder_package",
-    "//src/media/drivers/amlogic_decoder:amlogic_decoder_test_package",
-    "//src/media/drivers/amlogic_decoder:test_amlogic_codec_factory_package",
-    "//src/media/drivers/amlogic_decoder/tests/runner:amlogic_decoder_integration_tests",
-    "//src/media/drivers/amlogic_encoder:amlogic_encoder",
-    "//src/media/drivers/amlogic_encoder:amlogic_encoder_tests",
-    "//src/media/lib/codec_impl/unit_tests:codec_impl_unit_tests",
-    "//src/media/lib/codec_impl/unit_tests:decryptor_adapter_tests",
-    "//src/media/lib/mpsc_queue:mpsc_queue_unittests",
-    "//src/media/playback/examples/audio_player:audio_player",
-    "//src/media/playback/mediaplayer:mediaplayer_test_util",
-    "//src/media/playback/mediaplayer:mediaplayer_tests",
-    "//src/media/sessions:mediasession_tests",
-    "//src/media/sessions/examples:example_player",
-    "//src/media/sessions/mediasession_cli_tool:mediasession_cli_tool",
-    "//src/media/sounds/example:soundplayer_example",
-    "//src/media/sounds/soundplayer:soundplayer_tests",
-    "//src/media/stream_processors/test:audio_decoder_test",
-    "//src/media/stream_processors/test:audio_encoder_test",
-    "//src/media/stream_processors/test:h264_decoder_test",
-    "//src/media/stream_processors/test:h264_encoder_test",
-    "//src/media/stream_processors/test:stream_processor_lib_test",
-    "//src/media/stream_processors/test:video_frame_hasher_test",
-    "//src/modular:modular_lib_tests",
-    "//src/modular/bin/basemgr:auto_login_base_shell",
-    "//src/modular/bin/basemgr:auto_login_base_shell_test",
-    "//src/modular/bin/basemgr:basemgr_unittests",
-    "//src/modular/bin/basemgr:dev_base_shell",
-    "//src/modular/bin/basemgr_launcher:basemgr_launcher",
-    "//src/modular/bin/basemgr_launcher:basemgr_launcher_tests",
-    "//src/modular/bin/modular_test_harness:modular_test_harness",
-    "//src/modular/bin/modular_test_harness:tests",
-    "//src/modular/bin/sessionctl:sessionctl_unittests",
-    "//src/modular/bin/sessionmgr:dev_session_shell",
-    "//src/modular/bin/sessionmgr:sessionmgr_unittests",
-    "//src/modular/bin/sessionmgr/agent_runner:agent_runner_unittests",
-    "//src/modular/bin/sessionmgr/puppet_master:puppet_master_unittests",
-    "//src/modular/bin/sessionmgr/storage:storage_unittests",
-    "//src/modular/bin/sessionmgr/story/model:story_model_unittests",
-    "//src/modular/bin/sessionmgr/story_runner:dev_story_shell",
-    "//src/modular/bin/sessionmgr/story_runner:story_runner_unittests",
-    "//src/modular/lib/modular_test_harness/cpp:test_harness_fixture_test",
-    "//src/modular/tests:modular_integration_tests",
-    "//src/modular/tests:module_with_fake_runner",
-    "//src/recovery/factory_reset:factory_reset_pkg",
-    "//src/recovery/factory_reset:factory_reset_tests",
-    "//src/recovery/factory_reset:fdr_cli",
-    "//src/recovery/integration:recovery_integration_test",
-    "//src/recovery/system:system_recovery",
-    "//src/recovery/system:system_recovery_tests",
-    "//src/security/kms:kms_tests",
-    "//src/security/root_ssl_certificates:root_ssl_certificates",
-    "//src/security/tee_manager:optee_smoke_test",
-    "//src/security/tee_manager:optee_test",
-    "//src/security/tee_manager:tee_manager",
-    "//src/session/bin/component_manager:component_manager_sfw",
-    "//src/session/bin/session_manager:session_manager",
-    "//src/session/bin/session_manager:session_manager_tests",
-    "//src/session/examples/elements/element_proposer:element_proposer",
-    "//src/session/examples/elements/element_proposer:element_proposer_tests",
-    "//src/session/examples/elements/element_session:element_session",
-    "//src/session/examples/elements/element_session:element_session_tests",
-    "//src/session/examples/elements/simple_element:simple_element",
-    "//src/session/examples/elements/simple_element:simple_element_tests",
-    "//src/session/examples/graphical_session:graphical_session",
-    "//src/session/examples/graphical_session:graphical_session_tests",
-    "//src/session/examples/input_session:input_session",
-    "//src/session/examples/input_session:input_session_tests",
-    "//src/session/examples/scene_manager_session:scene_manager_session",
-    "//src/session/examples/scene_manager_session:scene_manager_session_tests",
-    "//src/session/lib/element_management:element_management_tests",
-    "//src/session/lib/input:input_tests",
-    "//src/session/lib/realm_management:realm_management_tests",
-    "//src/session/lib/scene_management:scene_management_tests",
-    "//src/session/tests/session_manager:session_manager_integration_tests",
-    "//src/session/tools/session_control:session_control",
-    "//src/session/tools/session_control:session_control_tests",
-    "//src/speech/tts:tts_service",
-    "//src/storage/ext4/server:ext4_readonly",
-    "//src/storage/ext4/server:ext4_server_integration_tests",
-    "//src/storage/ext4/server:ext4_server_tests",
-    "//src/sys/appmgr:appmgr_unittests",
-    "//src/sys/appmgr:system_data_file",
-    "//src/sys/appmgr/integration_tests:appmgr_integration_tests",
-    "//src/sys/appmgr/integration_tests/component_events:component_events_integration_tests",
-    "//src/sys/appmgr/integration_tests/components:components_binary_tests",
-    "//src/sys/appmgr/integration_tests/inspect:inspect_integration_tests",
-    "//src/sys/appmgr/integration_tests/inspect:inspect_vmo_integration_tests",
-    "//src/sys/appmgr/integration_tests/logs:appmgr_log_integration_tests",
-    "//src/sys/appmgr/integration_tests/mock_runner:appmgr_mock_runner",
-    "//src/sys/appmgr/integration_tests/mock_runner:fake_component_for_runner",
-    "//src/sys/appmgr/integration_tests/outdir:appmgr_out_dir_integration_test",
-    "//src/sys/appmgr/integration_tests/sandbox:has_pkg",
-    "//src/sys/appmgr/integration_tests/sandbox/features/ambient-executable-policy:has_ambient_executable",
-    "//src/sys/appmgr/integration_tests/sandbox/features/ambient-executable-policy:no_ambient_executable",
-    "//src/sys/appmgr/integration_tests/sandbox/features/ambient-executable-policy:no_ambient_executable_with_features",
-    "//src/sys/appmgr/integration_tests/sandbox/features/ambient-executable-policy:no_ambient_executable_with_sandbox",
-    "//src/sys/appmgr/integration_tests/sandbox/features/build-info:build_info_tests",
-    "//src/sys/appmgr/integration_tests/sandbox/features/global-data:has_global_data",
-    "//src/sys/appmgr/integration_tests/sandbox/features/global-data:no_global_data",
-    "//src/sys/appmgr/integration_tests/sandbox/features/global-data:restricted_global_data",
-    "//src/sys/appmgr/integration_tests/sandbox/features/hub:has_hub",
-    "//src/sys/appmgr/integration_tests/sandbox/features/hub:has_hub_deprecated_shell",
-    "//src/sys/appmgr/integration_tests/sandbox/features/hub:no_hub",
-    "//src/sys/appmgr/integration_tests/sandbox/features/isolated-temp:has_isolated_temp",
-    "//src/sys/appmgr/integration_tests/sandbox/features/isolated-temp:no_isolated_temp",
-    "//src/sys/appmgr/integration_tests/sandbox/features/shell:has_deprecated_shell",
-    "//src/sys/appmgr/integration_tests/sandbox/features/shell:no_shell",
-    "//src/sys/appmgr/integration_tests/sandbox/features/shell-commands:has_shell_commands",
-    "//src/sys/appmgr/integration_tests/sandbox/features/shell-commands:no_shell_commands",
-    "//src/sys/appmgr/integration_tests/sandbox/features/storage:has_isolated_cache_storage",
-    "//src/sys/appmgr/integration_tests/sandbox/features/storage:has_isolated_persistent_storage",
-    "//src/sys/appmgr/integration_tests/sandbox/features/storage:isolated_persistent_storage",
-    "//src/sys/appmgr/integration_tests/sandbox/features/storage:no_persistent_storage",
-    "//src/sys/appmgr/integration_tests/sandbox/features/system-temp:no_system_temp",
-    "//src/sys/appmgr/integration_tests/sandbox/services:some_services",
-    "//src/sys/appmgr/integration_tests/util:persistent_storage_test_util",
-    "//src/sys/component_index:component_index",
-    "//src/sys/component_index:component_index_tests",
-    "//src/sys/component_manager/src/elf_runner/tests:elf_runner_lifecycle_test",
-    "//src/sys/component_manager/tests/base_resolver_test:base_resolver_test",
-    "//src/sys/component_manager/tests/collections:collections_integration_test",
-    "//src/sys/component_manager/tests/destruction:destruction_integration_test",
-    "//src/sys/component_manager/tests/events:events_integration_test",
-    "//src/sys/component_manager/tests/hub:hub_integration_test",
-    "//src/sys/component_manager/tests/panic-test:component_manager_panic_test",
-    "//src/sys/component_manager/tests/rights:rights_integration_test",
-    "//src/sys/component_manager/tests/routing:routing_integration_test",
-    "//src/sys/component_manager/tests/shutdown:shutdown_integration_test",
-    "//src/sys/component_manager/tests/storage:storage_integration_test",
-    "//src/sys/component_manager/tests/work_scheduler:work_scheduler_integration_test",
-    "//src/sys/component_manager_for_test:component_manager_for_test_pkg",
-    "//src/sys/component_manager_for_test/tests:component_manager_for_test_integration_test",
-    "//src/sys/component_test_runner:component_test_runner",
-    "//src/sys/component_test_runner:component_test_runner_tests",
-    "//src/sys/dash:dash_test",
-    "//src/sys/device_settings:device_settings_manager",
-    "//src/sys/device_settings:device_settings_manager_tests",
-    "//src/sys/gtest_v1_runner:gtest_v1_runner",
-    "//src/sys/gtest_v1_runner:gtest_v1_runner_unittests",
-    "//src/sys/gtest_v1_runner/tests:gtest_v1_runner_test",
-    "//src/sys/lib/cm_fidl_translator:cm_fidl_translator_tests",
-    "//src/sys/lib/cm_fidl_validator:cm_fidl_validator_tests",
-    "//src/sys/lib/cm_json:cm_json_tests",
-    "//src/sys/lib/cm_rust:cm_rust_tests",
-    "//src/sys/lib/cm_types:cm_types_tests",
-    "//src/sys/lib/fuchsia_backoff:fuchsia_backoff_tests",
-    "//src/sys/lib/library_loader:library_loader_tests",
-    "//src/sys/lib/runner:runner_tests",
-    "//src/sys/locate:locate_integration_test",
-    "//src/sys/pkg/bin/amber:amber_tests",
-    "//src/sys/pkg/bin/amber/system_updater:system_updater_tests",
-    "//src/sys/pkg/bin/pkgfs:pmd_tests",
-    "//src/sys/run_test_suite:run_test_suite_pkg",
-    "//src/sys/run_test_suite/tests:run_test_suite_integration_tests",
-    "//src/sys/sysmgr/integration_tests:integration_tests",
-    "//src/sys/sysmgr/test:sysmgr_tests",
-    "//src/sys/test_manager:test_manager_pkg",
-    "//src/sys/test_manager/tests:test_manager_test",
-    "//src/sys/test_runners/gtest:gtest_runner",
-    "//src/sys/test_runners/gtest:gtest_runner_tests",
-    "//src/sys/tools/chrealm/integration_tests:chrealm_integrationtests",
-    "//src/sys/tools/chrealm/integration_tests:chrealm_test_get_message",
-    "//src/sys/tools/cs2/tests:cs2_test",
-    "//src/sys/tools/run/test:run_test_exiter",
-    "//src/sys/tools/run/test:run_tests",
-    "//src/sys/tools/stash_ctl:stash_ctl",
-    "//src/sys/tools/stash_ctl:stash_ctl_tests",
-    "//src/testing/loadbench:loadbench_tests",
-    "//src/testing/sl4f/tests/test_framework:sl4f_test_integration_tests",
-    "//src/tests/benchmarks:fuchsia_benchmarks",
-    "//src/tests/fidl/compatibility:fidl_compatibility_test",
-    "//src/tests/fidl/compatibility:fidl_compatibility_test_server_cpp",
-    "//src/tests/fidl/compatibility:fidl_compatibility_test_server_llcpp",
-    "//src/tests/fidl/compatibility:fidl_compatibility_test_server_rust",
-    "//src/tests/fidl/compatibility/golang:fidl_compatibility_test_server_go",
-    "//src/tests/fidl/fidl_go_conformance:fidl_go_conformance",
-    "//src/tests/microbenchmarks:fuchsia_microbenchmarks_perftestmode",
-    "//src/tests/microbenchmarks:fuchsia_microbenchmarks_unittestmode",
-    "//src/ui/a11y/bin/a11y_manager:a11y_manager",
-    "//src/ui/a11y/bin/a11y_manager/tests:a11y_tests",
-    "//src/ui/a11y/bin/a11y_tts_log_engine:a11y_tts_log_engine",
-    "//src/ui/a11y/bin/a11y_tts_log_engine/tests:a11y_tts_log_engine_tests",
-    "//src/ui/a11y/lib:a11y_lib_tests",
-    "//src/ui/bin/brightness_manager:brightness_manager",
-    "//src/ui/bin/brightness_manager:brightness_manager_tests",
-    "//src/ui/bin/default_hardware_ime:default_hardware_ime",
-    "//src/ui/bin/default_hardware_ime:default_hardware_ime_tests",
-    "//src/ui/bin/headless_root_presenter:headless_root_presenter",
-    "//src/ui/bin/headless_root_presenter/tests:headless_root_presenter_tests",
-    "//src/ui/bin/ime:ime_service",
-    "//src/ui/bin/ime:ime_test",
-    "//src/ui/bin/ime:keyboard_test",
-    "//src/ui/bin/root_presenter:root_presenter",
-    "//src/ui/bin/root_presenter/tests:root_presenter_tests",
-    "//src/ui/bin/shortcut:shortcut_test",
-    "//src/ui/examples:bouncing_ball",
-    "//src/ui/examples:direct_input",
-    "//src/ui/examples:direct_input_child",
-    "//src/ui/examples:pose_buffer_presenter",
-    "//src/ui/examples:scenic_dev_app",
-    "//src/ui/examples:shadertoy_client",
-    "//src/ui/examples:shadertoy_service",
-    "//src/ui/examples:simplest_app",
-    "//src/ui/examples:simplest_embedder",
-    "//src/ui/examples:spinning_square_view",
-    "//src/ui/examples:standalone_app",
-    "//src/ui/examples:tile_view",
-    "//src/ui/examples:yuv_to_image_pipe",
-    "//src/ui/examples/escher:escher_rainfall",
-    "//src/ui/examples/escher:escher_waterfall",
-    "//src/ui/lib/escher:escher_tests",
-    "//src/ui/lib/input_reader/tests:input_reader_tests",
-    "//src/ui/lib/key_util:test",
-    "//src/ui/scenic:scenic_tests",
-    "//src/ui/scenic/lib/gfx/tests:mock_pose_buffer_provider",
-    "//src/ui/scenic/lib/input/tests/fuzzers:input_fuzzers_pkg",
-    "//src/ui/scenic/lib/scenic/util/tests:scenic_util_tests_pkg",
-    "//src/ui/scenic/lib/utils:tests",
-    "//src/ui/testing/text:text_test_suite",
-    "//src/ui/testing/text:text_test_suite_tests",
-    "//src/ui/testing/views:test_views",
-    "//src/ui/tests/e2e_input_tests/scenic:e2e_input_tests",
-    "//src/ui/tools:gltf_export",
-    "//src/ui/tools:present_view",
-    "//src/ui/tools:print_input",
-    "//src/ui/tools:set_renderer_params",
-    "//src/ui/tools/present_view/tests:present_view_tests",
-    "//src/ui/tools/tiles:tiles_tests",
-    "//src/ui/tools/tiles_ctl:tiles_ctl",
-    "//src/virtualization/bin/guest_manager:guest_manager",
-    "//src/virtualization/bin/guest_manager:guest_manager_tests",
-    "//src/virtualization/bin/guest_runner:guest_runner",
-    "//src/virtualization/bin/vmm:vmm_tests",
-    "//src/virtualization/bin/vmm/device:virtio_balloon",
-    "//src/virtualization/bin/vmm/device:virtio_block",
-    "//src/virtualization/bin/vmm/device:virtio_console",
-    "//src/virtualization/bin/vmm/device:virtio_gpu",
-    "//src/virtualization/bin/vmm/device:virtio_input",
-    "//src/virtualization/bin/vmm/device:virtio_magma",
-    "//src/virtualization/bin/vmm/device:virtio_magma_mock_system",
-    "//src/virtualization/bin/vmm/device:virtio_net",
-    "//src/virtualization/bin/vmm/device:virtio_rng",
-    "//src/virtualization/bin/vmm/device:virtio_wl",
-    "//src/virtualization/lib/grpc:grpc_vsock_tests",
-    "//src/virtualization/lib/guest_interaction:guest_discovery_service",
-    "//src/virtualization/lib/guest_interaction:guest_interaction_tests",
-    "//src/virtualization/lib/vsh:vsh_tests",
-    "//src/virtualization/packages/biscotti_guest:biscotti_guest",
-    "//src/virtualization/packages/biscotti_guest/linux_runner:linux_runner",
-    "//src/virtualization/packages/biscotti_guest/linux_runner:linux_runner_tests",
-    "//src/virtualization/packages/debian_guest:debian_guest",
-    "//src/virtualization/packages/linux_guest:linux_guest",
-    "//src/virtualization/packages/termina_guest:termina_guest",
-    "//src/virtualization/packages/zircon_guest:zircon_guest",
-    "//src/virtualization/tests:guest_integration_tests",
-    "//src/virtualization/tests:guest_integration_tests_utils",
-    "//src/virtualization/tests:guest_unit_tests",
-    "//src/virtualization/tests:termina_integration_tests",
-    "//third_party/boringssl:boringssl_fuzzers_pkg",
-    "//third_party/boringssl:boringssl_tests",
-    "//third_party/boringssl:boringssl_tool",
-    "//third_party/cobalt:cobalt_core_tests",
-    "//third_party/leveldb:leveldb_benchmarks",
-    "//third_party/leveldb:leveldb_tests",
-    "//third_party/openssl-ecjpake:openssl-ecjpake_tests",
-    "//third_party/quickjs:qjs_tests",
-    "//third_party/re2:re2_tests",
-    "//tools/cmc:cmc_integration_test",
-    "//tools/fidlcat/tests:fidlcat_tests",
-    "//topaz/bin/crasher_dart:crasher_dart_pkg",
-    "//topaz/bin/crasher_flutter:crasher_flutter_pkg",
-    "//topaz/bin/fidl_bindings_test/server:fidl_bindings_test_server_pkg",
-    "//topaz/bin/fidl_bindings_test/test:fidl_bindings_test_pkg",
-    "//topaz/bin/fidl_compatibility_test:fidl_compatibility_test_topaz",
-    "//topaz/bin/fidl_compatibility_test/dart:fidl_dart_compatibility_test_server_pkg",
-    "//topaz/bin/flutter_screencap_test:flutter_screencap_test_app_pkg",
-    "//topaz/bin/flutter_screencap_test:flutter_screencap_test_pkg",
-    "//topaz/bin/ui/benchmarks/image_grid_flutter:image_grid_flutter_pkg",
-    "//topaz/lib/story_shell/examples/example_manual_relationships:example_manual_relationships_pkg",
-    "//topaz/lib/story_shell/examples/story_shell_test:key_listener_device_tests_pkg",
-    "//topaz/public/dart-pkg/zircon:dart_zircon_test_pkg",
-    "//topaz/public/dart/fuchsia_inspect/codelab/client:bin_pkg",
-    "//topaz/public/dart/fuchsia_inspect/codelab/fizzbuzz:bin_pkg",
-    "//topaz/public/dart/fuchsia_inspect/codelab/part_1:bin_pkg",
-    "//topaz/public/dart/fuchsia_inspect/codelab/part_1:inspect_dart_codelab_part_1_integration_tests_pkg",
-    "//topaz/public/dart/fuchsia_inspect/codelab/part_2:bin_pkg",
-    "//topaz/public/dart/fuchsia_inspect/codelab/part_2:inspect_dart_codelab_part_2_integration_tests_pkg",
-    "//topaz/public/dart/fuchsia_inspect/codelab/part_3:bin_pkg",
-    "//topaz/public/dart/fuchsia_inspect/codelab/part_3:inspect_dart_codelab_part_3_integration_tests_pkg",
-    "//topaz/public/dart/fuchsia_inspect/codelab/part_4:bin_pkg",
-    "//topaz/public/dart/fuchsia_inspect/codelab/part_4:inspect_dart_codelab_part_4_integration_tests_pkg",
-    "//topaz/public/dart/fuchsia_inspect/codelab/part_5:bin_pkg",
-    "//topaz/public/dart/fuchsia_inspect/codelab/part_5:inspect_dart_codelab_part_5_integration_tests_pkg",
-    "//topaz/public/dart/fuchsia_inspect/examples/inspect_mod:inspect_mod_pkg",
-    "//topaz/public/dart/fuchsia_inspect/examples/inspect_mod:inspect_mod_test_pkg",
-    "//topaz/public/dart/fuchsia_inspect/test/inspect_flutter_integration_tester:inspect_dart_integration_test_driver_pkg",
-    "//topaz/public/dart/fuchsia_inspect/test/inspect_flutter_integration_tester:inspect_flutter_integration_tester_pkg",
-    "//topaz/public/dart/fuchsia_inspect/test/integration:dart_inspect_vmo_test",
-    "//topaz/public/dart/fuchsia_inspect/test/integration:dart_inspect_vmo_test_writer_pkg",
-    "//topaz/public/dart/fuchsia_inspect_flutter/examples/torus15:torus15_dart_test_pkg",
-    "//topaz/public/dart/fuchsia_inspect_flutter/examples/torus15:torus15_flutter_pkg",
-    "//topaz/public/dart/fuchsia_modular:fuchsia_modular_package_integration_tests_pkg",
-    "//topaz/public/dart/fuchsia_modular/examples/fibonacci_agent:fibonacci_agent_pkg",
-    "//topaz/public/dart/fuchsia_modular/examples/multilevel_mod:multilevel_mod_pkg",
-    "//topaz/public/dart/fuchsia_modular/examples/shapes_mod:shapes_mod_pkg",
-    "//topaz/public/dart/fuchsia_modular/examples/slider_mod:slider_mod_pkg",
-    "//topaz/public/dart/fuchsia_modular/examples/slider_mod:slider_mod_tests_pkg",
-    "//topaz/public/dart/fuchsia_modular_testing:fuchsia_modular_testing_package_integration_tests_pkg",
-    "//topaz/public/dart/fuchsia_services:fuchsia_services_foo_test_server_pkg",
-    "//topaz/public/dart/fuchsia_services:fuchsia_services_package_integration_tests_pkg",
-    get_label_info(dart_package_label.fuchsia_vfs, "dir") +
-        ":fuchsia_vfs_package_unittests_pkg",
-    "//topaz/public/dart/fuchsia_webview_flutter/examples/webview_mod:webview_mod_pkg",
-    "//topaz/shell/story_shell_labs:story_shell_labs_pkg",
-    "//topaz/tests/dart_fidl_benchmarks:dart_fidl_benchmarks_pkg",
-  ]
-}
diff --git a/build/component/component_id_index.gni b/build/component/component_id_index.gni
index 7f33e1c..d514b9c 100644
--- a/build/component/component_id_index.gni
+++ b/build/component/component_id_index.gni
@@ -38,12 +38,14 @@
   }
 }
 
-# Produce a config_data() for appmgr containing a merged component ID index.
-# All component ID indices are merged together by transitively traversing
-# `deps`, and a single merged index is supplied to the config_data().
+# Collect and merge component ID indices supplied from component_id_index()s.
 #
-# A target defined from this template must be a (transitive) dependency of
-# config_package().
+# This template results in a resource() which contains a FIDL-wireformat encoded
+# index supplied through a resource(). A "-config-data" suffixed sub-target
+# contains a JSON-encoded index supplied through config_data().
+#
+# A target defined from this template must take a (transitive) dependency on
+# targets defined from the component_id_index() template.
 #
 # For more details on how to include this target in a system assembly, see:
 # //docs/development/components/component_id_index.md
@@ -57,7 +59,7 @@
 #   public_deps (optional)
 #   testonly (optional)
 #     Usual GN meanings.
-template("component_id_index_config_data") {
+template("component_id_index_config") {
   # Step 1:  Collect a list of index file paths into intermediary file
   # `component_id_index_merge_input_manifest`.
   component_id_index_input_manifest_path =
@@ -69,9 +71,8 @@
                              "deps",
                              "public_deps",
                              "testonly",
-                             "visibility",
                            ])
-
+    visibility = [ ":*" ]
     data_keys = [ "component_id_index" ]
     outputs = [ component_id_index_input_manifest_path ]
   }
@@ -79,32 +80,60 @@
   # Step 2: Merge the collected index files (listed in the intermediary file)
   # into 1 index.
   component_id_index_merge = "${target_name}_merge"
-  component_id_index_merged_index_path =
+  component_id_index_merged_index_json_path =
       "${target_out_dir}/${target_name}_merged_index.json"
+  component_id_index_merged_index_fidl_path =
+      "${target_out_dir}/${target_name}_merged_index.fidlbin"
+
   compiled_action(component_id_index_merge) {
     forward_variables_from(invoker, [ "testonly" ])
+    visibility = [ ":*" ]
     tool = "//tools/component_id_index"
     sources = [ component_id_index_input_manifest_path ]
-    outputs = [ component_id_index_merged_index_path ]
+    outputs = [
+      component_id_index_merged_index_json_path,
+      component_id_index_merged_index_fidl_path,
+    ]
     depfile = "${target_out_dir}/${target_name}.d"
     args = [
       "--input_manifest",
-      rebase_path(component_id_index_input_manifest_path),
-      "--output_file",
-      rebase_path(component_id_index_merged_index_path),
+      rebase_path(component_id_index_input_manifest_path, root_build_dir),
+      "--output_index_json",
+      rebase_path(component_id_index_merged_index_json_path, root_build_dir),
+      "--output_index_fidl",
+      rebase_path(component_id_index_merged_index_fidl_path, root_build_dir),
       "--depfile",
       rebase_path(depfile, root_build_dir),
     ]
     deps = [ ":${component_id_index_merged_inputs}" ]
   }
 
-  # Step 3: Supply the merged index file to appmgr as config data.
-  config_data(target_name) {
-    forward_variables_from(invoker, [ "testonly" ])
+  # Supply the merged index file to appmgr as config data.
+  config_data("${target_name}-config-data") {
+    forward_variables_from(invoker,
+                           [
+                             "testonly",
+                             "visibility",
+                           ])
     for_pkg = "appmgr"
-    sources = [ rebase_path(component_id_index_merged_index_path) ]
+    sources = [ component_id_index_merged_index_json_path ]
     outputs = [ "component_id_index" ]
 
     deps = [ ":${component_id_index_merge}" ]
   }
+
+  # This target produces the component_id_index as a resource pathed under
+  # "config/component_id_index".  This resource can be used to place the index
+  # in bootfs so that it is available to `component_manager` even before pkgfs
+  # is available.
+  resource("${target_name}") {
+    forward_variables_from(invoker,
+                           [
+                             "testonly",
+                             "visibility",
+                           ])
+    sources = [ component_id_index_merged_index_fidl_path ]
+    outputs = [ "config/component_id_index" ]
+    deps = [ ":${component_id_index_merge}" ]
+  }
 }
diff --git a/build/config/BUILD.gn b/build/config/BUILD.gn
index e211039..4024787 100644
--- a/build/config/BUILD.gn
+++ b/build/config/BUILD.gn
@@ -12,6 +12,7 @@
 import("//build/toolchain/concurrent_jobs.gni")
 import("//build/toolchain/goma.gni")
 import("//build/unification/global_variables.gni")
+import("//zircon/public/gn/config/experimental_cxx_version.gni")
 
 declare_args() {
   if (is_fuchsia) {
@@ -97,7 +98,7 @@
 
 config("language") {
   cflags_c = [ "-std=c11" ]
-  cflags_cc = [ "-std=c++17" ]
+  cflags_cc = [ "-std=c++$experimental_cxx_version" ]
   if (current_os == "mac") {
     # macOS needs this to not complain about C++17isms that older macOS
     # system libc++ doesn't support.  But we use our own toolchain's static
@@ -609,7 +610,6 @@
     "//garnet/bin/run_test_component/*",
     "//garnet/bin/sched/*",
     "//garnet/bin/test_runner/report_result/*",
-    "//garnet/bin/thermd/*",
     "//garnet/bin/time/*",
     "//garnet/bin/trace/*",
     "//garnet/bin/trace2json/*",
@@ -667,12 +667,6 @@
     "//src/developer/debug/zxdb/console/*",
     "//src/developer/debug/zxdb/expr/*",
     "//src/developer/debug/zxdb/symbols/*",
-    "//src/developer/forensics/crash_reports/*",
-    "//src/developer/forensics/exceptions/limbo_client/*",
-    "//src/developer/forensics/feedback_data/*",
-    "//src/developer/forensics/last_reboot/*",
-    "//src/developer/forensics/testing/stubs/*",
-    "//src/developer/forensics/utils/*",
     "//src/developer/memory/mem/*",
     "//src/developer/memory/metrics/*",
     "//src/developer/memory/metrics/tests/*",
@@ -702,6 +696,7 @@
     "//src/lib/elflib/*",
     "//src/lib/fidl_codec/*",
     "//src/lib/files/*",
+    "//src/lib/framebuffer/*",
     "//src/lib/fsl/socket/*",
     "//src/lib/fsl/vmo/*",
     "//src/lib/fxl/*",
@@ -757,10 +752,12 @@
     "//src/modular/lib/async/cpp/*",
     "//src/modular/lib/fidl/*",
     "//src/modular/lib/integration_testing/cpp/*",
+    "//src/power/thermd/*",
     "//src/security/tee_manager/*",
     "//src/storage/bin/dd/*",
     "//src/storage/blobfs/*",
     "//src/storage/blobfs/test/*",
+    "//src/storage/fvm/*",
     "//src/storage/minfs/*",
     "//src/storage/volume_image/ftl/*",
     "//src/storage/volume_image/utils/*",
@@ -816,11 +813,9 @@
     "//zircon/system/ulib/driver-info/*",
     "//zircon/system/ulib/edid/*",
     "//zircon/system/ulib/elf-search/*",
-    "//zircon/system/ulib/framebuffer/*",
     "//zircon/system/ulib/fs-host/*",
     "//zircon/system/ulib/fs/*",
     "//zircon/system/ulib/ftl/*",
-    "//zircon/system/ulib/fvm/*",
     "//zircon/system/ulib/fzl/*",
     "//zircon/system/ulib/hid-parser/*",
     "//zircon/system/ulib/hid/*",
@@ -855,7 +850,6 @@
     "//zircon/system/utest/msd/*",
     "//zircon/third_party/*",
     "//zircon/tools/blobfs/*",
-    "//zircon/tools/fvm/*",
     "//zircon/tools/ktrace-dump/*",
     "//zircon/tools/lz4/*",
     "//zircon/tools/merkleroot/*",
diff --git a/build/config/BUILDCONFIG.gn b/build/config/BUILDCONFIG.gn
index d6b46a4..bcdd2da 100644
--- a/build/config/BUILDCONFIG.gn
+++ b/build/config/BUILDCONFIG.gn
@@ -1559,6 +1559,10 @@
                "target_type",
              ])
 
+  # IMPORTANT: The computations below are duplicated in several other
+  # files. Please keep them in sync, see:
+  #   //zircon/system/ulib/c/libc_toolchain.gni
+  #   //zircon/system/ulib/zircon/vdso_toolchain.gni
   target_variant = false
   if (select_variant_canonical != []) {
     # See if there is a selector that matches this target.
@@ -1708,15 +1712,18 @@
           data_deps = []
         }
         if (zircon_toolchain == false) {
-          # TODO(60613): Remove line below once sysroot generation moves to the Fuchsia build.
-          data_deps += [ "//build/unification/lib/sysroot" ]
           if (defined(crate_root)) {
             deps += [ "//zircon/public/sysroot:rust_binary_deps" ]
           } else {
             deps += [ "//zircon/public/sysroot:cpp_binary_deps" ]
-            if (target_type == "executable" || target_type == "test") {
-              deps += [ "//zircon/public/sysroot:crt1_deps" ]
-            }
+          }
+
+          # Add dependency to the C runtime startup object file for executables,
+          # except if the static-pie-config is used.
+          if ((target_type == "executable" || target_type == "test") &&
+              configs + [ "//build/config/fuchsia:static-pie-config" ] -
+              [ "//build/config/fuchsia:static-pie-config" ] == configs) {
+            deps += [ "//zircon/public/sysroot:crt1_deps" ]
           }
         }
 
diff --git a/build/config/fuchsia/BUILD.gn b/build/config/fuchsia/BUILD.gn
index 06c5b56..c870de5 100644
--- a/build/config/fuchsia/BUILD.gn
+++ b/build/config/fuchsia/BUILD.gn
@@ -4,11 +4,12 @@
 
 import("//build/config/build_id.gni")
 import("//build/config/clang/clang.gni")
-import("//build/config/fuchsia/zircon_legacy_vars.gni")
+import("//build/config/sysroot.gni")
 import("//build/rust/config.gni")
 import("//build/toolchain/ccache.gni")
 import(
     "//zircon/public/gn/config/instrumentation/sanitizer_default_options.gni")
+import("//zircon/public/sysroot/rust.gni")
 
 assert(current_os == "fuchsia")
 
@@ -55,84 +56,24 @@
 
   rustflags = [
     "-L",
-    rebase_path(sysroot, root_build_dir) + "/lib",
+    rebase_path(rust_sysroot_dir, root_build_dir) + "/lib",
     "-Clinker=" + rebase_path("$clang_prefix/lld", "", root_build_dir),
-    "-Clink-arg=--sysroot=" + rebase_path(sysroot, root_build_dir),
-    "-Clink-arg=-L" + rebase_path(sysroot, root_build_dir) + "/lib",
+    "-Clink-arg=--sysroot=" + rebase_path(rust_sysroot_dir, root_build_dir),
     "-Clink-arg=-L" + clang_resource_dir + "/" + rust_target + "/lib",
     "-Clink-arg=--pack-dyn-relocs=relr",
     "-Clink-arg=-dynamic-linker=ld.so.1",
     "-Clink-arg=--icf=all",
   ]
 
-  lib_dirs = [
-    # These libraries are required by Rust's libstd.
-    "$root_build_dir/gen/sdk/lib/fdio",
-    "$root_build_dir/gen/zircon/system/ulib/trace-engine",
-    "$root_build_dir/gen/zircon/system/ulib/syslog",
-  ]
-
   if (build_id_format != "") {
     ldflags += [ "-Wl,--build-id=$build_id_format" ]
   }
 }
 
 config("compiler_sysroot") {
-  # Rather than using --sysroot and populating a sysroot per se, use
-  # specific compiler switches to find the C library and its headers from
-  # the Zircon build and source tree directly.
-  sysroot_include_dirs = []
-  foreach(entry, zircon_legacy_sysroot) {
-    if (defined(entry.include_dirs)) {
-      sysroot_include_dirs += entry.include_dirs
-    } else if (defined(entry.libc)) {
-      sysroot_libc = entry.libc
-    } else if (defined(entry.crt1)) {
-      sysroot_crt1 = entry.crt1
-    } else if (defined(entry.vdso)) {
-      sysroot_vdso = entry.vdso
-    }
-  }
-
-  # Point the preprocessor at the include directories.  Use -idirafter
-  # so they come in the same place in the search order as the --sysroot
-  # include directory would: after the compiler-supplied headers,
-  # allowing those to override and wrap libc headers via #include_next.
-  cflags = []
-  foreach(dir, sysroot_include_dirs) {
-    cflags += [
-      "-idirafter",
-      rebase_path(dir, root_build_dir, zircon_root_build_dir),
-    ]
-  }
+  cflags = [ "--sysroot=" + rebase_path(sysroot, root_build_dir) ]
   asmflags = cflags
-
-  # Point the linker at a little directory we populate below.  Plain -L
-  # switches (via lib_dirs) would be sufficient for the implicit -lc and -lm
-  # from the compiler driver.  But Scrt1.o is found only in the sysroot.
-  ldflags = [ "--sysroot=" + rebase_path(target_gen_dir, root_build_dir) ]
-
-  # Use input linker scripts found in the dummy sysroot to redirect to the
-  # actual Zircon binaries.  Because of this indirection (and the linker's
-  # lack of depfile support), the build system doesn't know about these
-  # dependencies.  So list them as inputs of everything to force re-links
-  # when they change.  This forces recompiles too since this config()
-  # applies to all compilation targets and not just linking ones, but this
-  # code only changes when touching core Zircon library sources.
-  libc = rebase_path(sysroot_libc, "", zircon_root_build_dir)
-  crt1 = rebase_path(sysroot_crt1, "", zircon_root_build_dir)
-  vdso = rebase_path(sysroot_vdso, "", zircon_root_build_dir)
-  inputs = [
-    libc,
-    crt1,
-    vdso,
-  ]
-  write_file("$target_gen_dir/lib/Scrt1.o", [ "INPUT(${crt1})" ])
-  write_file("$target_gen_dir/lib/libc.so", [ "INPUT(${libc})" ])
-  write_file("$target_gen_dir/lib/libdl.so", [ "/* dummy */" ])
-  write_file("$target_gen_dir/lib/libm.so", [ "/* dummy */" ])
-  write_file("$target_gen_dir/lib/libpthread.so", [ "/* dummy */" ])
-  write_file("$target_gen_dir/lib/libzircon.so", [ "INPUT(${vdso})" ])
+  ldflags = cflags
 }
 
 config("compiler_target") {
@@ -239,6 +180,19 @@
   libs = [ clang_libunwind ]
 }
 
+# NOTE: This config's label is hard-coded in //build/config/BUILDCONFIG.gn
+# so don't rename it without updating that file too!
+config("static-pie-config") {
+  cflags = [
+    "-fno-sanitize=all",
+    "-fno-stack-protector",
+  ]
+  ldflags = [
+    "-nostdlib",
+    "-Wl,-no-dynamic-linker",
+  ]
+}
+
 group("maybe_scudo_default_options") {
   if (scudo_default_options != "" && scudo_default_options != []) {
     public_deps = [ ":scudo_default_options" ]
diff --git a/build/config/fuchsia/zircon_legacy_vars.gni b/build/config/fuchsia/zircon_legacy_vars.gni
deleted file mode 100644
index 0539d07..0000000
--- a/build/config/fuchsia/zircon_legacy_vars.gni
+++ /dev/null
@@ -1,11 +0,0 @@
-# Copyright 2019 The Fuchsia Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import("//build/config/fuchsia/zircon.gni")
-
-# TODO: Refactor so that only //build/zircon/*_template.gn needs these.
-
-# See //zircon/public/sysroot/BUILD.gn and //build/config/fuchsia/BUILD.gn.
-zircon_legacy_sysroot =
-    read_file("$zircon_root_build_dir/legacy_sysroot-$target_cpu.json", "json")
diff --git a/build/config/sanitizers/BUILD.gn b/build/config/sanitizers/BUILD.gn
index a39407a..55d6064 100644
--- a/build/config/sanitizers/BUILD.gn
+++ b/build/config/sanitizers/BUILD.gn
@@ -9,6 +9,12 @@
 
 variant("asan") {
   common_flags = [ "-fsanitize=address" ]
+  cflags = [
+    # See https://fxbug.dev/66129 and
+    # https://github.com/google/sanitizers/issues/1017.
+    "-mllvm",
+    "-asan-use-private-alias=1",
+  ]
 
   # TODO(phosek): use runtime.json instead of invoking Clang.
   if (is_fuchsia) {
@@ -72,9 +78,48 @@
 # marked that way, the asan_default_options default above will be removed.
 sanitizer_extra_options("suppress-lsan.DO-NOT-USE-THIS") {
   visibility = [
-    # TODO(fxbug.dev/45047): Constrain this and give each a TODO(nnnnn) comment.
-    # Then rename without the scare caps.
-    "*",
+    "//garnet/bin/cpuperf/tests/*",
+    "//garnet/bin/setui/*",
+    "//garnet/bin/trace/*",
+    "//garnet/bin/trace/tests/*",
+    "//src/connectivity/bluetooth/profiles/bt-a2dp-sink/*",
+    "//src/connectivity/bluetooth/profiles/bt-a2dp/*",
+    "//src/connectivity/weave/*",
+    "//src/developer/debug/debug_agent/*",
+    "//src/developer/memory/monitor/tests/*",
+    "//src/developer/shell/mirror/*",
+    "//src/devices/sysmem/tests/sysmem/fuzz/*",
+    "//src/devices/usb/drivers/xhci-rewrite/*",
+    "//src/fonts/font_info/*",
+    "//src/graphics/drivers/msd-intel-gen/tests/unit_tests/*",
+    "//src/graphics/lib/compute/*",
+    "//src/graphics/lib/compute/mold/*",
+    "//src/graphics/lib/compute/surpass/*",
+    "//src/graphics/tests/benchmark/*",
+    "//src/graphics/tests/vkext/*",
+    "//src/graphics/tests/vkloop/*",
+    "//src/lib/cmx/*",
+    "//src/lib/scoped_task/*",
+    "//src/lib/vulkan/tests/*",
+    "//src/lib/zircon/rust/*",
+    "//src/media/audio/drivers/intel-hda/controller/*",
+    "//src/media/playback/mediaplayer/*",
+    "//src/ui/examples/escher/test/*",
+    "//src/ui/input/drivers/hid-input-report/*",
+    "//src/ui/lib/escher/test/*",
+    "//src/ui/scenic/lib/flatland/buffers/tests/*",
+    "//src/ui/scenic/lib/flatland/renderer/tests/*",
+    "//src/ui/scenic/lib/input/tests/*",
+    "//src/ui/scenic/lib/input/tests/fuzzers/*",
+    "//third_party/crashpad/*",
+    "//third_party/mesa/src/intel/vulkan/tests/*",
+    "//third_party/openssh-portable/*",
+    "//third_party/rust_crates/*",
+    "//vendor/*",
+    "//zircon/system/ulib/elf-search/test/*",
+    "//zircon/system/ulib/fit/test/*",
+    "//zircon/system/ulib/lazy_init/test/*",
+    "//zircon/third_party/rapidjson/*",
   ]
 
   args = [ "detect_leaks=0" ]
diff --git a/build/config/sanitizers/OWNERS b/build/config/sanitizers/OWNERS
new file mode 100644
index 0000000..878a9e7
--- /dev/null
+++ b/build/config/sanitizers/OWNERS
@@ -0,0 +1,8 @@
+leonardchan@google.com
+dnordstrom@google.com
+gulfem@google.com
+phosek@google.com
+mcgrathr@google.com
+shayba@google.com
+
+# COMPONENT: Toolchain
diff --git a/build/config/sysroot.gni b/build/config/sysroot.gni
index 5c6305f..117385d 100644
--- a/build/config/sysroot.gni
+++ b/build/config/sysroot.gni
@@ -10,8 +10,8 @@
 if (current_os == target_os && target_sysroot != "") {
   sysroot = target_sysroot
 } else if (is_fuchsia) {
-  sysroot = get_label_info("//build/config/fuchsia:compiler_sysroot",
-                           "target_gen_dir")
+  import("//zircon/public/sysroot/cpp.gni")
+  sysroot = cpp_sysroot_dir
 } else if (is_linux) {
   sysroot = "//prebuilt/third_party/sysroot/linux"
 } else if (is_mac) {
diff --git a/build/config/zircon/BUILD.gn b/build/config/zircon/BUILD.gn
index a5d1dd0..6ca36ea 100644
--- a/build/config/zircon/BUILD.gn
+++ b/build/config/zircon/BUILD.gn
@@ -784,22 +784,10 @@
     # Disable UBSan for this target temporarily until it is migrated into CI/CQ.
     "$zx/system/ulib/zxtest:*",
 
-    # TODO(fxbug.dev/42496): UBSan has found an instance of undefined behavior in this target.
-    # Disable UBSan for this target temporarily until it is migrated into CI/CQ.
-    "$zx_build/system/utest/fidl-simple:*",
-
-    # TODO(fxbug.dev/41897): UBSan has found an instance of undefined behavior in this target.
-    # Disable UBSan for this target temporarily until it is migrated into CI/CQ.
-    "$zx_build/system/utest/fidl:*",
-
     # TODO(fxbug.dev/41900): UBSan has found an instance of undefined behavior in this target.
     # Disable UBSan for this target temporarily until it is migrated into CI/CQ.
     "$zx_build/system/utest/trace:*",
 
-    # TODO(fxbug.dev/41663): UBSan has found an instance of undefined behavior in this target.
-    # Disable UBSan for this target temporarily until it is migrated into CI/CQ.
-    "$zx_build/third_party/lib/acpica:*",
-
     # TODO(fxbug.dev/41901): UBSan has found an instance of undefined behavior in this target.
     # Disable UBSan for this target temporarily until it is migrated into CI/CQ.
     "$zx_build/third_party/uapp/dash:*",
@@ -816,25 +804,9 @@
     # Disable UBSan for this target temporarily until it is migrated into CI/CQ.
     "$zx/third_party/ulib/musl/src/stdio:*",
 
-    # TODO(fxbug.dev/41903): UBSan has found an instance of undefined behavior in this target.
-    # Disable UBSan for this target temporarily until it is migrated into CI/CQ.
-    "$zx/third_party/ulib/musl/third_party/math:*",
-
     # TODO(fxbug.dev/41904): UBSan has found an instance of undefined behavior in this target.
     # Disable UBSan for this target temporarily until it is migrated into CI/CQ.
     "$zx_build/third_party/ulib/boringssl:*",
-
-    # TODO(fxbug.dev/43658): UBSan has found an instance of undefined behavior in this target.
-    # Disable UBSan for this target temporarily until it is migrated into CI/CQ.
-    "$zx_build/system/dev/block/sdmcc:*",
-
-    # TODO(fxbug.dev/46944): UBSan has found an instance of undefined behavior in this target.
-    # Disable UBSan for this target temporarily until it is migrated into CI/CQ.
-    "$zx_build/system/ulib/perftest:*",
-
-    # TODO(fxbug.dev/60442): UBSan has found an instance of undefined behavior in this target.
-    # Disable UBSan for this target temporarily until is is migrated into CI/CQ.
-    "$zx/tools/fidl:*",
   ]
 
   if (!is_gcc) {
@@ -918,3 +890,13 @@
 
   configs = [ "//build/config/zircon:no_exceptions" ]
 }
+
+# Statically linked posititon independent executable.
+config("static-pie") {
+  cflags = [ "-fPIE" ]
+  defines = [ "ZX_STATIC_PIE=1" ]
+  ldflags = [
+    "-Wl,-pie",
+    "-Wl,--no-dynamic-linker",
+  ]
+}
diff --git a/build/config/zircon/rodso.ld b/build/config/zircon/rodso.ld
new file mode 100644
index 0000000..7b5e92f
--- /dev/null
+++ b/build/config/zircon/rodso.ld
@@ -0,0 +1,136 @@
+/* Copyright 2016 The Fuchsia Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/*
+ * This is a linker script for producing a DSO (shared library) image
+ * that is entirely read-only and trivial to map in without using a
+ * proper ELF loader.  It has two segments: read-only starting at the
+ * beginning of the file, and executable code page-aligned and marked
+ * by the (hidden) symbols CODE_START and CODE_END.
+ *
+ * Ideally this could be accomplished without an explicit linker
+ * script.  The linker would need an option to make the .dynamic
+ * section (aka PT_DYNAMIC segment) read-only rather than read-write;
+ * in fact that could be the default for Zircon/Fuchsia or for
+ * anything using a dynamic linker like musl's that doesn't try to
+ * write into the .dynamic section at runtime (for -shared that is;
+ * for -pie and dynamically-linked executables there is the DT_DEBUG
+ * question).  The linker would need a second option to entirely
+ * segregate code from rodata (and from non-loaded parts of the file),
+ * and page-align the code segment (and pad the end to a page
+ * boundary); in fact that could be the default for any system that
+ * wants to minimize what can go into pages mapped with execute
+ * permission, which is a worthwhile trade-off of security mitigation
+ * over tiny amounts of wasted space in the ELF file.  Beyond that,
+ * the linker should not generate the .got* or .plt* sections at all
+ * when there are no relocs being generated, but today's linkers still
+ * do; since some of those sections are writable, they cause the
+ * creation of a writable PT_LOAD segment by normal linker logic.
+ */
+
+SECTIONS {
+    . = 0 + SIZEOF_HEADERS;
+
+    /*
+     * This should be defined automatically by the linker.
+     * But LLD fails to do so in the presence of a linker script.
+     * So define it explicitly.
+     * TODO(mcgrathr): If http://bugs.llvm.org/show_bug.cgi?id=32367
+     * is ever fixed, remove this.
+     */
+    PROVIDE_HIDDEN(__ehdr_start = . - SIZEOF_HEADERS);
+
+    /*
+     * Match the non-allocated Gold version note specially, so
+     * it doesn't go into the allocated .note section below.
+     * With BFD ld, the .note clause could use:
+     *     INPUT_SECTION_FLAGS(SHF_ALLOC) *(.note*)
+     * so as not to match any non-allocated note sections generically.
+     * But gold and lld do not support the INPUT_SECTION_FLAGS keyword.
+     */
+    .note.gnu.gold-version : { *(.note.gnu.gold-version) }
+
+    .note : {
+        *(.note*)
+    } :rodata :note
+    .dynamic : {
+        *(.dynamic)
+    } :rodata :dynamic
+    .hash : {
+        *(.hash)
+    } :rodata
+    .gnu.hash : { *(.gnu.hash) }
+    .dynsym : { *(.dynsym) }
+    .dynstr : { *(.dynstr) }
+
+    .rodata : {
+        *(.rodata .rodata.* .gnu.linkonce.r.*)
+    } :rodata
+    .rodata1 : { *(.rodata1) }
+    .eh_frame_hdr : {
+        *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*)
+    } :rodata :eh_frame_hdr
+    .eh_frame : {
+        KEEP(*(.eh_frame))
+        *(.eh_frame.*)
+    } :rodata
+    .gcc_except_table : { *(.gcc_except_table*) }
+    .gnu_extab : { *(.gnu_extab*) }
+
+    /*
+     * We'd like to discard these linker-generated sections with /DISCARD/
+     * (or convince the linker not to generate them at all).
+     * But the linker doesn't know how to do that.
+     */
+    .got : { *(.got*) }
+    .plt : { *(.plt*) }
+
+    /*
+     * This section will only exist if there were some dynamic relocation
+     * sections generated by the linker.  If this happens, the code is
+     * broken (it uses PC-sensitive static initializers or suchlike).
+     */
+    .norelocs : { *(.rel*) }
+    ASSERT(SIZEOF(.norelocs) == 0,
+           "rodso code must avoid dynamic relocations!")
+
+    /*
+     * Likewise, this will only exist if there was some writable data.
+     */
+    .nodata : { *(.data*) *(.sdata*) *(.bss*) *(.sbss*) }
+    ASSERT(SIZEOF(.nodata) == 0,
+           "rodso code must avoid writable data sections!")
+
+    . = ALIGN(CONSTANT(MAXPAGESIZE));
+
+    .text : {
+        *(.text.unlikely .text.*_unlikely .text.unlikely.*)
+        *(.text.exit .text.exit.*)
+        *(.text.startup .text.startup.*)
+        *(.text.hot .text.hot.*)
+        *(.text .stub .text.* .gnu.linkonce.t.*)
+        *(.init .init.* .fini .fini.*)
+        *(.gnu.warning)
+        *(.glue_7t) *(.glue_7) *(.vfp11_veneer) *(.v4_bx)
+
+        PROVIDE_HIDDEN(_end = .);
+
+        /*
+         * Pad out the code segment to a page boundary, so that there
+         * is only nop or zero padding visible in the memory image
+         * rather than seeing non-loaded portions of the ELF file
+         * (.shstrtab, section headers, .symtab if not stripped, etc.).
+         */
+        . = ALIGN(CONSTANT(MAXPAGESIZE));
+    } :code
+}
+
+PHDRS {
+    rodata PT_LOAD FLAGS(4) FILEHDR PHDRS;
+    code PT_LOAD FLAGS(5);
+    dynamic PT_DYNAMIC FLAGS(4);
+    note PT_NOTE;
+    eh_frame_hdr PT_GNU_EH_FRAME;
+}
diff --git a/build/cpp/sdk_shared_library.gni b/build/cpp/sdk_shared_library.gni
index 591b3b6..b77d9ad 100644
--- a/build/cpp/sdk_shared_library.gni
+++ b/build/cpp/sdk_shared_library.gni
@@ -342,7 +342,7 @@
       runtime_deps_file,
     ]
 
-    stamp_file = "$target_gen_dir/$target_name.stamp"
+    stamp_file = "$target_gen_dir/$target_name.gn_stamp"
 
     outputs = [ stamp_file ]
 
diff --git a/build/cpp/sdk_static_library.gni b/build/cpp/sdk_static_library.gni
index 4ec8bae..340ced7 100644
--- a/build/cpp/sdk_static_library.gni
+++ b/build/cpp/sdk_static_library.gni
@@ -286,7 +286,7 @@
       runtime_deps_file,
     ]
 
-    stamp_file = "$target_gen_dir/$target_name.stamp"
+    stamp_file = "$target_gen_dir/$target_name.gn_stamp"
 
     outputs = [ stamp_file ]
 
diff --git a/build/dart/BUILD.gn b/build/dart/BUILD.gn
index 0e299bd..d562fce 100644
--- a/build/dart/BUILD.gn
+++ b/build/dart/BUILD.gn
@@ -2,6 +2,7 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+import("//build/dart/fidl_move.gni")
 import("//build/dart/toolchain.gni")
 import("//build/toolchain/basic_toolchain.gni")
 import("//build/toolchain/concurrent_jobs.gni")
@@ -45,7 +46,8 @@
     "//topaz/public/dart/fuchsia_inspect/test/integration:dart-inspect-vmo-test-writer",
     "//topaz/public/dart/fuchsia_inspect/test/validator_puppet:dart-inspect-validator-puppet",
     "//topaz/public/dart/fuchsia_modular/examples/fibonacci_agent:fibonacci-agent",
-    "//topaz/public/dart/fuchsia_services/test_support:fuchsia-services-foo-test-server",
+    get_label_info(dart_package_label.fuchsia_services, "dir") +
+        "/test_support:fuchsia-services-foo-test-server",
     "//topaz/tests/dart-inspect-benchmarks:dart-inspect-benchmarks",
   ]
 
@@ -61,15 +63,13 @@
 }
 
 # Copies the patched SDK files for the dart runner
-# NOTE: copies to an _nnbd directory to allow for soft transition
 copy("platform_files") {
   sources = [
     "//prebuilt/third_party/flutter/$target_cpu/release/aot/dart_runner_patched_sdk/platform_strong.dill",
     "//prebuilt/third_party/flutter/$target_cpu/release/aot/dart_runner_patched_sdk/platform_strong.dill.d",
     "//prebuilt/third_party/flutter/$target_cpu/release/aot/dart_runner_patched_sdk/vm_outline_strong.dill",
   ]
-  outputs =
-      [ "$root_out_dir/dart_runner_patched_sdk_nnbd/{{source_file_part}}" ]
+  outputs = [ "$root_out_dir/dart_runner_patched_sdk/{{source_file_part}}" ]
 }
 
 if (current_toolchain == dart_toolchain) {
diff --git a/build/dart/OWNERS b/build/dart/OWNERS
index 7d9d356..24177ef 100644
--- a/build/dart/OWNERS
+++ b/build/dart/OWNERS
@@ -1,6 +1,7 @@
+include /src/dart/OWNERS
+
 pylaligand@google.com
 dworsham@google.com
-chaselatta@google.com
 
 per-file fidl_dart.gni = file: /src/fidl/OWNERS
 per-file fidlmerge_dart.gni = file: /src/fidl/OWNERS
diff --git a/build/dart/config.gni b/build/dart/config.gni
index ca54131..bdb2512 100644
--- a/build/dart/config.gni
+++ b/build/dart/config.gni
@@ -4,13 +4,10 @@
 
 import("//build/dart/dart_build_config.gni")
 
-# Temporary import to allow for soft transition
-import("//topaz/runtime/dart/config.gni")
 declare_args() {
   # Forces all Dart apps to build in product mode which is a
   # stripped down version of the VM running in AOT mode.
-  # TODO uncomment this once references to //topaz/runtime/dart/config.gni are converted
-  #dart_force_product = false
+  dart_force_product = false
 
   # TODO(fxbug.dev/64153) renable aot builds
   #  if (dart_force_product) {
diff --git a/build/dart/dart_build_config.gni b/build/dart/dart_build_config.gni
index f844987..ebf4831 100644
--- a/build/dart/dart_build_config.gni
+++ b/build/dart/dart_build_config.gni
@@ -6,7 +6,7 @@
 # launch the vm service in the runner.
 dart_debug_build_cfg = {
   runtime_meta = "//build/dart/meta/jit_runtime"
-  runner_dep = "//topaz/runtime/dart_runner:dart_jit_runner"
+  runner_dep = "//src/dart:dart_jit_runner"
   platform_name = "dart_runner"
   is_aot = false
   is_product = false
@@ -19,7 +19,7 @@
 # profile aot runner is built without asserts.
 dart_aot_debug_build_cfg = {
   runtime_meta = "//build/dart/meta/aot_runtime"
-  runner_dep = "//topaz/runtime/dart_runner:dart_aot_runner"
+  runner_dep = "//src/dart:dart_aot_runner"
   platform_name = "dart_runner"
   is_aot = true
   is_product = false
@@ -30,7 +30,7 @@
 # launch the vm service in the runner.
 dart_profile_build_cfg = {
   runtime_meta = "//build/dart/meta/aot_runtime"
-  runner_dep = "//topaz/runtime/dart_runner:dart_aot_runner"
+  runner_dep = "//src/dart:dart_aot_runner"
   platform_name = "dart_runner"
   is_aot = true
   is_product = false
@@ -41,7 +41,7 @@
 # not launch the vm service in the runner.
 dart_release_build_cfg = {
   runtime_meta = "//build/dart/meta/aot_product_runtime"
-  runner_dep = "//topaz/runtime/dart_runner:dart_aot_product_runner"
+  runner_dep = "//src/dart:dart_aot_product_runner"
   platform_name = "dart_runner"
   is_aot = true
   is_product = true
diff --git a/build/dart/fidl_dart.gni b/build/dart/fidl_dart.gni
index 873a3f5..1075ca5 100644
--- a/build/dart/fidl_dart.gni
+++ b/build/dart/fidl_dart.gni
@@ -26,10 +26,11 @@
   generation_target_name = "${target_name}_dart_generate"
 
   library_name = target_name
+  root_dir = "$target_gen_dir/${library_name}_package"
   if (defined(invoker.name)) {
     library_name = invoker.name
+    root_dir = "$target_gen_dir/${target_name}_${library_name}_package"
   }
-  root_dir = "$target_gen_dir/${library_name}_package"
   bindings_dir = "$root_dir/lib"
   async_bindings_file = "$bindings_dir/fidl_async.dart"
   test_bindings_file = "$bindings_dir/fidl_test.dart"
diff --git a/build/dart/fidl_move.gni b/build/dart/fidl_move.gni
index 76062ae..9d42cad 100644
--- a/build/dart/fidl_move.gni
+++ b/build/dart/fidl_move.gni
@@ -8,7 +8,9 @@
 dart_package_label = {
   fidl = "//sdk/dart/fidl/"
   fuchsia = "//sdk/dart/fuchsia/"
-  fuchsia_vfs = "//topaz/public/dart/fuchsia_vfs/"
+  fuchsia_scenic = "//topaz/public/dart/fuchsia_scenic"
+  fuchsia_services = "//topaz/public/dart/fuchsia_services/"
+  fuchsia_vfs = "//sdk/dart/fuchsia_vfs/"
   zircon = "//sdk/dart/zircon/"
 }
 
diff --git a/build/dart/kernel/dart_kernel.gni b/build/dart/kernel/dart_kernel.gni
index 6e05c65..6506588 100644
--- a/build/dart/kernel/dart_kernel.gni
+++ b/build/dart/kernel/dart_kernel.gni
@@ -71,12 +71,19 @@
 #  packages_path (required)
 #    Path to the package_config.json file.
 #
-#  main_dart (required)
-#    Path to Dart source file containing main(). This is relative the source_dir
-#    and should exist in the main_package.
+#  main_dart (required, mutually exclusive)
+#    Path to Dart source file containing main(). Mutually exclusive with
+#    main_dart_file. This is relative to source_dir, should exist in the
+#    main_package, and uses a package: URI.
 #
-#  main_package (required)
-#    The name of the package which contains main.
+#  main_package (required, mutually exclusive)
+#    The name of the package which contains main. Mutually exclusive with
+#    main_dart_file.
+#
+#  main_dart_file (required, mutually exclusive)
+#    Path to Dart source file containing main(). Mutually exclusive with
+#    main_dart. This doesn't need to necessarily exist in main_package and uses
+#    a fuchsia-source: URI.
 #
 #  product (required)
 #    Whether this should be built with the product runner.
@@ -109,8 +116,10 @@
   assert(defined(invoker.platform_name), "dart_kernel() requires platform_name")
   assert(defined(invoker.packages_path),
          "dart_kernel() requires the path to the package config")
-  assert(defined(invoker.main_dart), "dart_kernel() requires main_dart")
-  assert(defined(invoker.main_package), "dart_kernel() requires main_package")
+  assert(
+      (defined(invoker.main_dart) && defined(invoker.main_package)) !=
+          defined(invoker.main_dart_file),
+      "dart_kernel() requires either (main_dart and main_package) or main_dart_file")
   assert(defined(invoker.product), "dart_kernel() requires product")
   assert(defined(invoker.is_aot), "dart_kernel() requires is_aot")
 
@@ -179,10 +188,10 @@
 
     if (invoker.platform_name == flutter_platform_name) {
       _kernel_deps += [ "//build/flutter:platform_files" ]
-      _platform_path = "$root_out_dir/flutter_runner_patched_sdk_nnbd"
+      _platform_path = "$root_out_dir/flutter_runner_patched_sdk"
     } else if (invoker.platform_name == dart_platform_name) {
       _kernel_deps += [ "//build/dart:platform_files" ]
-      _platform_path = "$root_out_dir/dart_runner_patched_sdk_nnbd"
+      _platform_path = "$root_out_dir/dart_runner_patched_sdk"
     } else {
       assert(false,
              "platform_name must be either dart_runner or flutter_runner")
@@ -264,7 +273,12 @@
       }
     }
 
-    args += [ "package:${invoker.main_package}/${invoker.main_dart}" ]
+    if (defined(invoker.main_dart)) {
+      args += [ "package:${invoker.main_package}/${invoker.main_dart}" ]
+    } else {
+      rebased_main_dart = rebase_path(invoker.main_dart_file, "//")
+      args += [ "$_multi_root_scheme:///$rebased_main_dart" ]
+    }
 
     deps = _kernel_deps
   }
diff --git a/build/dev.gni b/build/dev.gni
index 218a411..981e44d 100644
--- a/build/dev.gni
+++ b/build/dev.gni
@@ -12,4 +12,12 @@
   # List of labels for targets that should be built but not included in any
   # build outputs that are part of the build API (e.g. zbi's, package servers).
   dev_build_only_deps = []
+
+  # An optional golden file for fuchsia.zbi kernel cmdline args. If specified,
+  # this golden file would be compared against fuchsia.zbi kernel cmdline during
+  # build time.
+  dev_fuchsia_zbi_kernel_cmdline_golden = ""
+
+  # List of labels for objects to include in the ZBI.
+  dev_bootfs_labels = []
 }
diff --git a/build/dist/distribution_manifest.gni b/build/dist/distribution_manifest.gni
index 80188bd..f5c1f5a 100644
--- a/build/dist/distribution_manifest.gni
+++ b/build/dist/distribution_manifest.gni
@@ -125,3 +125,37 @@
     }
   }
 }
+
+# Adds distribution entries from a given FINI manifest file.
+#
+# Use this template to ensure that all entries from a given input FINI
+# manifest are collected through distribution_manifest() or fini_manifest()
+# when this target is part of their dependency tree.
+#
+# Parameters
+#
+#   file (required)
+#     Path to a FINI file.
+#
+#   deps
+#   testonly
+#   visibility
+template("distribution_entries_file") {
+  assert(defined(invoker.file), "Must specify file")
+  group(target_name) {
+    forward_variables_from(invoker,
+                           [
+                             "deps",
+                             "testonly",
+                             "visibility",
+                           ])
+    metadata = {
+      distribution_entries_files = [
+        {
+          file = rebase_path(invoker.file, root_build_dir)
+          label = get_label_info(":$target_name", "label_with_toolchain")
+        },
+      ]
+    }
+  }
+}
diff --git a/build/flutter/BUILD.gn b/build/flutter/BUILD.gn
index ae2df95..b4720fd 100644
--- a/build/flutter/BUILD.gn
+++ b/build/flutter/BUILD.gn
@@ -21,9 +21,9 @@
     "//src/experiences/benchmarks/bin/gamma_flutter:gamma-flutter",
     "//src/experiences/benchmarks/bin/scroll_flutter:scroll-flutter",
     "//src/experiences/session_shells/ermine/shell:ermine",
-    "//src/ui/tests/e2e_flutter_tests/embedder/child-view:child-view",
-    "//src/ui/tests/e2e_flutter_tests/embedder/parent-view:parent-view",
-    "//src/ui/tests/e2e_input_tests/touch/one-flutter:one-flutter",
+    "//src/ui/tests/integration_flutter_tests/embedder/child-view:child-view",
+    "//src/ui/tests/integration_flutter_tests/embedder/parent-view:parent-view",
+    "//src/ui/tests/integration_input_tests/touch/one-flutter:one-flutter",
     "//src/tests/intl/timestamp-server-dart:timestamp-server-flutter",
     "//topaz/public/dart/fuchsia_inspect/examples/inspect_mod:inspect-mod",
     "//topaz/public/dart/fuchsia_inspect/test/inspect_flutter_integration_tester:inspect-flutter-integration-tester",
@@ -47,13 +47,11 @@
 }
 
 # Copies the patched SDK files for the flutter runner
-# NOTE: copies to an _nnbd directory to allow for soft transition
 copy("platform_files") {
   sources = [
     "//prebuilt/third_party/flutter/$target_cpu/release/aot/flutter_runner_patched_sdk/platform_strong.dill",
     "//prebuilt/third_party/flutter/$target_cpu/release/aot/flutter_runner_patched_sdk/platform_strong.dill.d",
     "//prebuilt/third_party/flutter/$target_cpu/release/aot/flutter_runner_patched_sdk/vm_outline_strong.dill",
   ]
-  outputs =
-      [ "$root_out_dir/flutter_runner_patched_sdk_nnbd/{{source_file_part}}" ]
+  outputs = [ "$root_out_dir/flutter_runner_patched_sdk/{{source_file_part}}" ]
 }
diff --git a/build/flutter/OWNERS b/build/flutter/OWNERS
index 455b3ba..6fdb23c 100644
--- a/build/flutter/OWNERS
+++ b/build/flutter/OWNERS
@@ -1,4 +1,5 @@
-chaselatta@google.com
+include /src/dart/OWNERS
+
 kaushikiska@google.com
 pylaligand@google.com
 zra@google.com
diff --git a/build/flutter/flutter_build_config.gni b/build/flutter/flutter_build_config.gni
index 985cfb1..e5354fd 100644
--- a/build/flutter/flutter_build_config.gni
+++ b/build/flutter/flutter_build_config.gni
@@ -10,7 +10,7 @@
 # launch the vm service in the runner.
 flutter_debug_build_cfg = {
   runtime_meta = "//build/flutter/meta/jit_runtime"
-  runner_dep = "//topaz/runtime/flutter_runner:flutter_jit_runner"
+  runner_dep = "//src/flutter:flutter_jit_runner"
   platform_name = "flutter_runner"
   is_aot = false
   is_product = false
@@ -23,7 +23,7 @@
 # profile aot runner is built without asserts.
 flutter_aot_debug_build_cfg = {
   runtime_meta = "//build/flutter/meta/aot_runtime"
-  runner_dep = "//topaz/runtime/flutter_runner:flutter_aot_runner"
+  runner_dep = "//src/flutter:flutter_aot_runner"
   platform_name = "flutter_runner"
   is_aot = true
   is_product = false
@@ -34,7 +34,7 @@
 # launch the vm service in the runner.
 flutter_profile_build_cfg = {
   runtime_meta = "//build/flutter/meta/aot_runtime"  # profile runner
-  runner_dep = "//topaz/runtime/flutter_runner:flutter_aot_runner"
+  runner_dep = "//src/flutter:flutter_aot_runner"
   platform_name = "flutter_runner"
   is_aot = true
   is_product = false
@@ -45,7 +45,7 @@
 # not launch the vm service in the runner.
 flutter_release_build_cfg = {
   runtime_meta = "//build/flutter/meta/aot_product_runtime"
-  runner_dep = "//topaz/runtime/flutter_runner:flutter_aot_product_runner"
+  runner_dep = "//src/flutter:flutter_aot_product_runner"
   platform_name = "flutter_runner"
   is_aot = true
   is_product = true
diff --git a/build/flutter/flutter_component.gni b/build/flutter/flutter_component.gni
index 96420e5..0c9564d 100644
--- a/build/flutter/flutter_component.gni
+++ b/build/flutter/flutter_component.gni
@@ -123,11 +123,8 @@
   }
 
   _flutter_driver_extendable = false
-  if (_build_cfg.is_aot) {
-    not_needed(invoker, [ "flutter_driver_extendable" ])
-  } else if (defined(invoker.flutter_driver_extendable)) {
-    # flutter driver is only available to JIT builds
-    _flutter_driver_extendable = invoker.flutter_driver_extendable && is_debug
+  if (defined(invoker.flutter_driver_extendable)) {
+    _flutter_driver_extendable = invoker.flutter_driver_extendable
   }
 
   if (_flutter_driver_extendable) {
@@ -163,8 +160,10 @@
                                "visibility",
                              ])
 
-      deps = [ "//third_party/dart-pkg/git/flutter/packages/flutter_driver" ] +
-             _component_deps
+      deps = [
+               "//third_party/dart-pkg/git/flutter/packages/flutter",
+               "//third_party/dart-pkg/git/flutter/packages/flutter_driver",
+             ] + _component_deps
 
       package_root = _generated_package_root
 
diff --git a/build/flutter/gen_debug_wrapper_main.py b/build/flutter/gen_debug_wrapper_main.py
index 230b38e..8de4fe6 100755
--- a/build/flutter/gen_debug_wrapper_main.py
+++ b/build/flutter/gen_debug_wrapper_main.py
@@ -36,40 +36,48 @@
 import 'dart:async';
 
 import 'package:flutter_driver/driver_extension.dart';
+import 'package:flutter/services.dart';
 ''')
     outfile.write(
         "import 'package:%s/%s' as flutter_app_main;\n" %
         (args.main_package, args.main))
     outfile.write(
         '''
-void main() async {
-  assert((()  {
+void main(List<String> args) async {
+  // TODO(awdavies): Use the logger instead.
+  print('Overriding app main method because flutter_driver_extendable '
+      'is enabled in the build file');
+
+  try {
+    // Enables Flutter Driver VM service extension
+    //
+    // This extension is required for tests that use package:flutter_driver
+    // to drive applications from a separate process.
+    final handler = OptionalMethodChannel('flutter_driver/handler');
+    enableFlutterDriverExtension(handler: (String data) async {
+      return handler.invokeMethod(data);
+    });
+
     // TODO(awdavies): Use the logger instead.
-    print('Overriding app main method because flutter_driver_extendable '
-        'is enabled in the build file');
+    print('flutter driver extensions enabled.');
+    //ignore: avoid_catches_without_on_clauses
+  } catch (e) {
+    // TODO(awdavies): Use the logger instead.
+    // Noop.
+    print('flutter driver extensions not enabled. $e');
+  }
 
-    try {
-      // Enables Flutter Driver VM service extension
-      //
-      // This extension is required for tests that use package:flutter_driver
-      // to drive applications from a separate process.
-      enableFlutterDriverExtension();
-
-      // TODO(awdavies): Use the logger instead.
-      print('flutter driver extensions enabled.');
-      //ignore: avoid_catches_without_on_clauses
-    } catch (e) {
-      // TODO(awdavies): Use the logger instead.
-      // Noop.
-      print('flutter driver extensions not enabled. $e');
-    }
-    // Always return true so that the assert succeeds.
-    return true;
-  }()));
   // Execute the main method of the app under test
-  var res = (flutter_app_main.main as dynamic)();
-  if (res != null && res is Future) {
-    await res;
+  try {
+    var res = (flutter_app_main.main as dynamic)(args);
+    if (res != null && res is Future) {
+      await res;
+    }
+  } on NoSuchMethodError catch(_) {
+    var res = (flutter_app_main.main as dynamic)();
+    if (res != null && res is Future) {
+      await res;
+    }
   }
 }
 ''')
diff --git a/build/flutter/internal/build_test_bootstrap/BUILD.gn b/build/flutter/internal/build_test_bootstrap/BUILD.gn
new file mode 100644
index 0000000..7454c5e
--- /dev/null
+++ b/build/flutter/internal/build_test_bootstrap/BUILD.gn
@@ -0,0 +1,15 @@
+# Copyright 2020 The Fuchsia Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/dart/dart_tool.gni")
+
+dart_tool("build_test_bootstrap") {
+  main_dart = "main.dart"
+
+  deps = [
+    "//third_party/dart-pkg/git/flutter/packages/flutter_tools",
+    "//third_party/dart-pkg/pub/args",
+    "//third_party/dart-pkg/pub/crypto",
+  ]
+}
diff --git a/build/flutter/internal/build_test_bootstrap/analysis_options.yaml b/build/flutter/internal/build_test_bootstrap/analysis_options.yaml
new file mode 100644
index 0000000..1315200
--- /dev/null
+++ b/build/flutter/internal/build_test_bootstrap/analysis_options.yaml
@@ -0,0 +1,10 @@
+# Copyright 2018 The Fuchsia Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+include: ../../../../topaz/tools/analysis_options.yaml
+analyzer:
+  errors:
+    implementation_imports: ignore
+    cascade_invocations: ignore
+    prefer_conditional_assignment: ignore
diff --git a/build/flutter/internal/build_test_bootstrap/main.dart b/build/flutter/internal/build_test_bootstrap/main.dart
new file mode 100644
index 0000000..dc8ecf2
--- /dev/null
+++ b/build/flutter/internal/build_test_bootstrap/main.dart
@@ -0,0 +1,41 @@
+// Copyright 2018 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import 'dart:async';
+import 'dart:io';
+
+import 'package:args/args.dart';
+import 'package:flutter_tools/src/test/flutter_platform.dart' as loader;
+
+const String _testNameKey = 'test-name';
+const String _outputKey = 'output';
+
+/// Builds the Flutter test wrapper that gets executed by the test harness.
+Future<Null> main(List<String> args) async {
+  ArgParser parser = ArgParser();
+  parser.addOption(
+    _testNameKey,
+    valueHelp: 'filename',
+    help: 'Basename of the test script file being wrapped.',
+  );
+  parser.addOption(
+    _outputKey,
+    valueHelp: 'path',
+    help: 'Path to the output file that this tool should generate.',
+  );
+  ArgResults results = parser.parse(args);
+
+  if (!results.wasParsed(_outputKey) || !results.wasParsed(_testNameKey)) {
+    stderr.writeln(parser.usage);
+    exit(1);
+  }
+
+  String content = loader.generateTestBootstrap(
+    testUrl: Uri.parse(results[_testNameKey]),
+    host: InternetAddress.loopbackIPv4,
+  );
+
+  File outputFile = File(results[_outputKey]);
+  await outputFile.writeAsString(content);
+}
diff --git a/build/flutter/internal/build_test_bootstrap/pubspec.yaml b/build/flutter/internal/build_test_bootstrap/pubspec.yaml
new file mode 100644
index 0000000..e99ace9
--- /dev/null
+++ b/build/flutter/internal/build_test_bootstrap/pubspec.yaml
@@ -0,0 +1,21 @@
+# Copyright 2018 The Fuchsia Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+name: kernel_compiler
+version: 0.1.0-alpha.1
+description: Builds platform and increment kernel files for dart_runner
+homepage: https://fuchsia.googlesource.com/topaz/+/HEAD/runtime/dart_runner/
+author: Dart Team <misc@dartlang.org>
+
+dependencies:
+  front_end: any
+  kernel: any
+  vm: any
+
+dependency_overrides:
+  front_end:
+    path: ../../../third_party/dart/pkg/front_end/
+  kernel:
+    path: ../../../third_party/dart/pkg/kernel/
+  vm:
+    path: ../../../third_party/dart/pkg/vm/
diff --git a/build/flutter/internal/flutter_dart_component.gni b/build/flutter/internal/flutter_dart_component.gni
index d757191..562f0ed 100644
--- a/build/flutter/internal/flutter_dart_component.gni
+++ b/build/flutter/internal/flutter_dart_component.gni
@@ -7,7 +7,7 @@
 import("//build/dart/dart_package_config.gni")
 import("//build/dart/kernel/dart_kernel.gni")
 import("//src/sys/build/components.gni")
-import("//tools/cmc/build/cmx.gni")
+import("//tools/cmc/build/cmc.gni")
 
 # Creates a flutter asset manifest from the pubspec.yaml file.
 #
@@ -150,7 +150,7 @@
   # on compilation modes
   _merged_target_name = "${target_name}_merged.cmx"
 
-  cmx_merge(_merged_target_name) {
+  cmc_merge(_merged_target_name) {
     forward_variables_from(invoker,
                            [
                              "testonly",
diff --git a/build/flutter/internal/gen_flutter_test_bundle_invocation.py b/build/flutter/internal/gen_flutter_test_bundle_invocation.py
new file mode 100755
index 0000000..2a5db61
--- /dev/null
+++ b/build/flutter/internal/gen_flutter_test_bundle_invocation.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+# Copyright 2018 The Fuchsia Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import os
+import stat
+import string
+import sys
+
+
+def main():
+    parser = argparse.ArgumentParser(
+        description=
+        'Generate a script that invokes multiple flutter_test targets')
+    parser.add_argument(
+        '--wd',
+        help=
+        'Path to the working directory, relative to that of the invocation file',
+        required=True)
+    parser.add_argument(
+        '--out', help='Path to the invocation file to generate', required=True)
+    parser.add_argument(
+        '--test',
+        action='append',
+        help=
+        'Adds a target to the list of test executables, relative to the working directory',
+        required=True)
+    args = parser.parse_args()
+
+    test_file = args.out
+    test_dir = os.path.dirname(test_file)
+    if not os.path.exists(test_dir):
+        os.makedirs(test_dir)
+
+    script = '''#!/bin/bash
+
+# DO NOT EDIT
+# This script is generated by:
+#   //build/flutter/internal/gen_flutter_test_bundle_invocation.py
+# See: //build/flutter/test.gni
+
+'''
+    script += 'cd "$(dirname $0)/%s"\n' % args.wd
+    for test_executable in args.test:
+        script += '%s "$@"\n' % test_executable
+
+    with open(test_file, 'w') as file:
+        file.write(script)
+    permissions = (
+        stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP |
+        stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH)
+    os.chmod(test_file, permissions)
+
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/build/flutter/internal/gen_flutter_test_invocation.py b/build/flutter/internal/gen_flutter_test_invocation.py
new file mode 100755
index 0000000..fafc2e4
--- /dev/null
+++ b/build/flutter/internal/gen_flutter_test_invocation.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+# Copyright 2017 The Fuchsia Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import os
+import stat
+import string
+import sys
+
+
+def main():
+    parser = argparse.ArgumentParser(
+        description='Generate a script that invokes the Dart tester')
+    parser.add_argument(
+        '--wd',
+        help=
+        'Path to the working directory, relative to that of the invocation file',
+        required=True)
+    parser.add_argument(
+        '--out', help='Path to the invocation file to generate', required=True)
+    parser.add_argument(
+        '--dart',
+        help='Path to the dart binary, relative to the working directory',
+        required=True)
+    parser.add_argument(
+        '--snapshot',
+        help=
+        'Path to snapshot of the the test runner, relative to the working directory',
+        required=True)
+    parser.add_argument(
+        '--sdk-root',
+        help='Path to the SDK platform files, relative to the working directory',
+        required=True)
+    parser.add_argument(
+        '--tests',
+        help=
+        'Path to test-to-precompiled-kernel file list, relative to the working directory',
+        required=True)
+    parser.add_argument(
+        '--dot-packages',
+        help='Path to the .packages file, relative to the working directory',
+        required=True)
+    parser.add_argument(
+        '--flutter-shell',
+        help='Path to the Flutter shell, relative to the working directory',
+        required=True)
+    parser.add_argument(
+        '--icudtl',
+        help='Path to the ICU data file, relative to the working directory',
+        required=True)
+    args = parser.parse_args()
+
+    test_file = args.out
+    test_path = os.path.dirname(test_file)
+    if not os.path.exists(test_path):
+        os.makedirs(test_path)
+
+    script_template = string.Template(
+        '''#!/bin/bash
+# DO NOT EDIT
+# This script is generated by:
+#   //build/flutter/internal/gen_flutter_test_invocation.py
+# See: //build/flutter/test.gni
+
+cd "$$(dirname $$0)/$wd"
+
+$dart $snapshot \\
+  --packages=$dot_packages \\
+  --shell=$flutter_shell \\
+  --tests=$tests \\
+  --sdk-root=$sdk_root \\
+  --icudtl=$icudtl \\
+  "$$@"
+''')
+    with open(test_file, 'w') as file:
+        file.write(script_template.substitute(args.__dict__))
+    permissions = (
+        stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP |
+        stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH)
+    os.chmod(test_file, permissions)
+
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/build/flutter/test.gni b/build/flutter/test.gni
new file mode 100644
index 0000000..21720aa
--- /dev/null
+++ b/build/flutter/test.gni
@@ -0,0 +1,386 @@
+# Copyright 2019 The Fuchsia Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/dart/dart_library.gni")
+import("//build/dart/dart_package_config.gni")
+import("//build/dart/kernel/dart_kernel.gni")
+import("//build/dart/toolchain.gni")
+import("//build/testing/test_spec.gni")
+
+# Defines a flutter test suite
+#
+# Parameters
+#
+#   sources (required)
+#     The list of test files, which must be within source_dir.
+#
+#   source_dir (optional)
+#     Directory containing the test sources. Defaults to "test".
+#     Note: this cannot be set to ".".
+#
+#   deps (optional)
+#     List of labels for Dart libraries this suite depends on.
+#
+#   disable_analysis (optional)
+#     Prevents analysis from being run on this target.
+#
+# Example of usage:
+#
+#   flutter_test("baz_test") {
+#     deps = [
+#       "//foo/baz",
+#       "//third_party/dart-pkg/pub/test",
+#     ]
+#   }
+if (current_toolchain == dart_toolchain) {
+  template("flutter_test2") {
+    assert(defined(invoker.sources),
+           "flutter_test() requires 'sources' be defined")
+    if (defined(invoker.source_dir)) {
+      assert(invoker.source_dir != ".",
+             "Cannot set source_dir to '.' because it breaks code coverage.")
+    }
+
+    _main_target_name = target_name
+    _library_target_name = "${target_name}_library"
+    _copy_target_name = "${target_name}_copy"
+    _snapshot_target_name = "${target_name}_snapshot"
+
+    _source_dir = "test"
+    if (defined(invoker.source_dir)) {
+      _source_dir = invoker.source_dir
+    }
+
+    _dart_deps = []
+    if (defined(invoker.deps)) {
+      _dart_deps += invoker.deps
+    }
+
+    dart_library(_library_target_name) {
+      forward_variables_from(invoker, [ "disable_analysis" ])
+
+      package_name = _main_target_name
+
+      # We want to mimic the package_root being in place of the source_dir. Dart
+      # does not allow multiple packages to share the same package_root so we
+      # do this so our /test directories can live along side out /lib directories
+      # which is how most dart packages are structured in out tree.
+      package_root = rebase_path(_source_dir, ".")
+      source_dir = "."
+
+      sources = invoker.sources
+
+      deps = _dart_deps
+
+      # We include the pubspec and analysis options files which are at the
+      # original package root since we are effectively changing the package_root
+      # for this library.
+      pubspec = "pubspec.yaml"
+
+      if (!defined(disable_analysis) ||
+          (defined(disable_analysis) && !disable_analysis)) {
+        # analysis is not disabled so include the options file
+        options_file = "analysis_options.yaml"
+      }
+    }
+
+    _packages_file = "$target_gen_dir/${target_name}_package_config.json"
+    _dart_deps += [
+      ":$_library_target_name",
+      "//third_party/dart-pkg/git/flutter/packages/flutter_test",
+      "//third_party/dart-pkg/git/flutter/packages/flutter_tools",
+    ]
+    _package_config_target_name = "${target_name}_package_config"
+
+    dart_package_config(_package_config_target_name) {
+      deps = _dart_deps
+      outputs = [ _packages_file ]
+    }
+
+    # The binary does not depend on the runtime-mode, using debug is fine.
+    _flutter_tester_label = "//src/flutter:flutter_tester"
+    _flutter_tester_gen_dir =
+        get_label_info(_flutter_tester_label, "target_gen_dir")
+    _flutter_tester_bin =
+        rebase_path("${_flutter_tester_gen_dir}/flutter_tester")
+
+    _precompiled_kernel_target_names = []
+    _tests_json = []
+    _tests_filename = "$target_gen_dir/tests.json"
+
+    _test_runtime_deps = [ _tests_filename ]
+
+    foreach(_source_file, invoker.sources) {
+      _source_path = "$_source_dir/$_source_file"
+      _trimmed_source = string_replace(_source_file, "_test.dart", "")
+      if (_source_file != _trimmed_source) {
+        _trimmed_source = string_replace(_trimmed_source, ".", "_")
+        _trimmed_source = string_replace(_trimmed_source, "/", "_")
+        _test_target_name = "${_main_target_name}_${_trimmed_source}"
+        _kernel_target_name = "${_test_target_name}_dill"
+        _bootstrap_target_name = "${_test_target_name}_bootstrap"
+        _pubspec_target_name = "${_test_target_name}_pubspec"
+
+        _bootstrap_filename = "$target_gen_dir/${_bootstrap_target_name}.dart"
+        _dill_filename = "$target_gen_dir/${_kernel_target_name}_kernel.dil"
+
+        action(_bootstrap_target_name) {
+          script = "$root_out_dir/dart-tools/build_test_bootstrap"
+          outputs = [ _bootstrap_filename ]
+
+          rebased_source = rebase_path(_source_path, target_gen_dir)
+          args = [
+            "--output",
+            rebase_path(_bootstrap_filename),
+            "--test-name",
+            "$rebased_source",
+          ]
+
+          deps = [ "//build/flutter/internal/build_test_bootstrap" ]
+        }
+
+        # Dart requires each package to have a unique package_root. This
+        # will copy the pubspec into a unique directory for each source
+        # allowing us to set that directory as the package_root.
+        copy(_pubspec_target_name) {
+          sources = [ "pubspec.yaml" ]
+          outputs = [ "${target_gen_dir}/${_test_target_name}/pubspec.yaml" ]
+        }
+
+        dart_kernel2(_kernel_target_name) {
+          platform_name = "flutter_runner"
+          packages_path = _packages_file
+          main_dart_file = _bootstrap_filename
+
+          product = false
+          is_aot = false
+
+          # By default the dart_kernel will not link the current platform.dill but
+          # when running host tests this will fail because the sdk patched for
+          # fuchsia will be used.
+          link_platform = true
+
+          deps = [
+            ":$_bootstrap_target_name",
+            ":$_library_target_name",
+            ":$_package_config_target_name",
+            ":$_pubspec_target_name",
+            "//third_party/dart-pkg/git/flutter/packages/flutter_test",
+            "//third_party/dart-pkg/pub/clock",
+            "//third_party/dart-pkg/pub/fake_async",
+            "//third_party/dart-pkg/pub/stack_trace",
+            "//third_party/dart-pkg/pub/stream_channel",
+            "//third_party/dart-pkg/pub/test",
+            "//third_party/dart-pkg/pub/test_api",
+          ]
+        }
+
+        _precompiled_kernel_target_names += [ ":${_kernel_target_name}_kernel" ]
+
+        _tests_json += [
+          {
+            source = rebase_path(_bootstrap_filename, root_build_dir)
+            dill = rebase_path(_dill_filename, root_build_dir)
+          },
+        ]
+        _test_runtime_deps += [
+          _bootstrap_filename,
+          _dill_filename,
+        ]
+      }
+    }
+
+    write_file(_tests_filename, _tests_json, "json")
+
+    # Copies resources to the build directory so that it may be archived
+    # with the test and the rest of the test's dependencies, so that the
+    # archiving happens with respect to the build directory.
+    _data_dir = "$target_gen_dir/${_main_target_name}_data"
+    _icudtl_file = "$_data_dir/icudtl.dat"
+    _dart_binary = "$_data_dir/dart"
+    copy(_copy_target_name) {
+      sources = [
+        "//prebuilt/third_party/dart/$host_os-$host_cpu/bin/dart",
+        "//prebuilt/third_party/flutter/$host_cpu/deps/icudtl.dat",
+      ]
+      outputs = [ "$_data_dir/{{source_file_part}}" ]
+    }
+
+    # Creates a snapshot file of the fuchsia tester, which allows the test to
+    # be invoked hermetically.
+    _snapshot = "$target_gen_dir/${_main_target_name}.snapshot"
+    _flutter_tools_label = "//third_party/dart-pkg/git/flutter/packages/flutter_tools:flutter_tools"
+    _main_file = "//third_party/dart-pkg/git/flutter/packages/flutter_tools/bin/fuchsia_tester.dart"
+
+    action(_snapshot_target_name) {
+      depfile = "${_snapshot}.d"
+
+      outputs = [ _snapshot ]
+
+      script = _dart_binary
+
+      # The snapshot path needs to be rebased on top of the root build dir so
+      # that the resulting depfile gets properly formatted.
+      _rebased_snapshot = rebase_path(_snapshot, root_build_dir)
+      _rebased_depfile = rebase_path(depfile)
+      _rebased_packages_path = rebase_path(_packages_file)
+
+      args = [
+        "--snapshot=$_rebased_snapshot",
+        "--snapshot-depfile=$_rebased_depfile",
+        "--packages=$_rebased_packages_path",
+        rebase_path(_main_file),
+      ]
+
+      deps = dart_sdk_deps + [
+               "$_flutter_tools_label",
+               ":$_copy_target_name",
+               ":$_package_config_target_name",
+             ]
+    }
+
+    _invocation_file = "$target_gen_dir/$target_name"
+
+    # _invocation_params encapsulates the parameters to pass to the
+    # invocation-generating action below. The utility lies in being able to
+    # construct the actions args and metadata at the same time.
+    _invocation_params = [
+      {
+        flag = "--wd"
+
+        # TODO(crbug.com/gn/56): Rebasing root_build_dir alone yields a path
+        # component that leaves root_build_dir, preventing portability.
+        path = "$root_build_dir/dummy/.."
+        base = get_path_info(_invocation_file, "dir")
+      },
+      {
+        flag = "--out"
+        path = _invocation_file
+        base = ""  # Will result in an absolute path.
+      },
+      {
+        flag = "--dart"
+        path = _dart_binary
+        base = root_build_dir
+      },
+      {
+        flag = "--snapshot"
+        path = _snapshot
+        base = root_build_dir
+      },
+      {
+        flag = "--tests"
+        path = _tests_filename
+        base = root_build_dir
+      },
+      {
+        flag = "--dot-packages"
+        path = _packages_file
+        base = root_build_dir
+      },
+      {
+        flag = "--flutter-shell"
+        path = _flutter_tester_bin
+        base = root_build_dir
+      },
+      {
+        flag = "--icudtl"
+        path = _icudtl_file
+        base = root_build_dir
+      },
+      {
+        flag = "--sdk-root"
+        path = "$root_out_dir/flutter_runner_patched_sdk"
+        base = root_build_dir
+      },
+    ]
+
+    action(_main_target_name) {
+      script = "//build/flutter/internal/gen_flutter_test_invocation.py"
+      testonly = true
+      outputs = [ _invocation_file ]
+
+      inputs = [
+        _packages_file,
+        _bootstrap_filename,
+        _flutter_tester_bin,
+        _tests_filename,
+      ]
+
+      args = []
+      foreach(param, _invocation_params) {
+        args += [
+          param.flag,
+          rebase_path(param.path, param.base),
+        ]
+        if (param.flag != "--wd") {
+          _test_runtime_deps += [ param.path ]
+        }
+      }
+
+      deps = [
+               ":$_library_target_name",
+               ":$_bootstrap_target_name",
+               ":${_kernel_target_name}_kernel",
+               ":$_snapshot_target_name",
+               ":$_copy_target_name",
+               ":$_package_config_target_name",
+               _flutter_tester_label,
+             ] + _precompiled_kernel_target_names
+
+      metadata = {
+        test_runtime_deps = _test_runtime_deps
+      }
+    }
+  }
+} else {
+  # Not the Dart toolchain.
+  template("flutter_test2") {
+    _main_target_name = target_name
+    _spec_target_name = "${target_name}_spec"
+    _invocation_file = "$target_gen_dir/$target_name"
+
+    if (is_linux || is_mac) {
+      test_spec(_spec_target_name) {
+        target = ":$_main_target_name"
+        path = _invocation_file
+
+        deps = [ ":$_main_target_name($dart_toolchain)" ]
+      }
+    } else {
+      not_needed([ "_spec_target_name" ])
+    }
+
+    action(_main_target_name) {
+      script = "//build/flutter/internal/gen_flutter_test_bundle_invocation.py"
+      testonly = true
+      not_needed(invoker, "*")
+
+      outputs = [ _invocation_file ]
+
+      _dart_target_gen_dir =
+          get_label_info(":bogus($dart_toolchain)", "target_gen_dir")
+      _delegate_file = "$_dart_target_gen_dir/$_main_target_name"
+
+      args = [
+        "--wd",
+
+        # TODO(crbug.com/gn/56): Rebasing root_build_dir alone yields a path
+        # component that leaves root_build_dir, preventing portability.
+        rebase_path("$root_build_dir/dummy/..",
+                    get_path_info(_invocation_file, "dir")),
+        "--out",
+        rebase_path(_invocation_file),
+        "--test",
+        rebase_path(_delegate_file, root_build_dir),
+      ]
+
+      deps = [ ":$_main_target_name($dart_toolchain)" ]
+
+      if (is_linux || is_mac) {
+        data_deps = [ ":$_spec_target_name" ]
+      }
+    }
+  }
+}
diff --git a/build/go/build.py b/build/go/build.py
index f733892..0cfa727 100755
--- a/build/go/build.py
+++ b/build/go/build.py
@@ -219,6 +219,9 @@
         cflags.extend(['-isystem', dir])
     ldflags.extend(['-L' + dir for dir in args.lib_dir])
 
+    if args.sysroot:
+        ldflags.extend(['-L' + args.sysroot + '/lib'])
+
     cflags_joined = ' '.join(cflags)
     ldflags_joined = ' '.join(ldflags)
 
diff --git a/build/go/go_build.gni b/build/go/go_build.gni
index 97718e9..ed5c90c 100644
--- a/build/go/go_build.gni
+++ b/build/go/go_build.gni
@@ -8,6 +8,7 @@
 import("//build/sdk/sdk_host_tool.gni")
 import("//build/toolchain/breakpad.gni")
 import("//build/toolchain/concurrent_jobs.gni")
+import("//zircon/public/sysroot/go.gni")
 
 declare_args() {
   #   gocache_dir
@@ -120,6 +121,9 @@
     target_name = main_target_name
     _variant_shared = false
 
+    if (carchive) {
+    }
+
     forward_variables_from(invoker,
                            [
                              "testonly",
@@ -158,6 +162,13 @@
     godepfile = "//prebuilt/tools/godepfile/${host_platform}/godepfile"
     inputs = [ godepfile ]
 
+    if (is_fuchsia) {
+      # For Fuchsia binaries, use a Go-specific sysroot instead of the
+      # standard one which only contains empty linker stubs (see
+      # comments in //zircon/public/sysroot/BUILD.gn for details).
+      sysroot = go_sysroot_dir
+    }
+
     args = [
       "--godepfile",
       rebase_path(godepfile, "", root_build_dir),
@@ -246,16 +257,36 @@
 
     if (carchive) {
       args += [ "--buildmode=c-archive" ]
+
+      # carchive is only set when building go fuzzers as static libraries,
+      # that is then linked into a fuzzer executables. Make sure that it
+      # is built in the same build variant as the one used for these, to
+      # ensure they grab the right dependencies.
+      variant_selector_target_type = "fuzzed_executable"
     }
 
     if (is_fuchsia) {
+      # Inject a dependency to libfdio.so. Note that as a special case,
+      # when building fuzzing binaries, this library should be built in
+      # a non-fuzzing variant (because the fuzzing runtime depends on it).
+      # So compute the correct toolchain for it directly here.
+      _fdio_toolchain =
+          string_replace(current_toolchain, "-fuzzer", "") + "-shared"
+      _fdio_label_with_toolchain = "//sdk/lib/fdio($_fdio_toolchain)"
+
       deps += [
-        # TODO(60613) Remove once sysroot generation moves to the Fuchsia build.
-        "//build/unification/lib/sysroot",
-        "//sdk/lib/fdio",
         "//zircon/public/sysroot:go_binary_deps",
+        _fdio_label_with_toolchain,
       ]
 
+      if (_fdio_toolchain != current_toolchain + "-shared") {
+        args += [
+          "--lib-dir",
+          rebase_path(
+              get_label_info(_fdio_label_with_toolchain, "root_out_dir")),
+        ]
+      }
+
       if (!carchive && output_breakpad_syms && host_os != "mac") {
         args += [
           "--dump-syms",
diff --git a/build/go/go_fuzzer.gni b/build/go/go_fuzzer.gni
index cae0275..fbcbcfa 100644
--- a/build/go/go_fuzzer.gni
+++ b/build/go/go_fuzzer.gni
@@ -93,9 +93,7 @@
       disable_syslog_backend = true
     }
 
-    # As noted in go_build.gni, Go libraries do not generate distinct outputs for each variants,
-    # but always use the output of the base toolchain.
-    deps = [ ":${wrapper_name}(${toolchain_variant.base})" ]
+    deps = [ ":$wrapper_name" ]
     base_root_out_dir = get_label_info(deps[0], "root_out_dir")
     base_library_name = get_label_info(deps[0], "name")
     libs = [ "$base_root_out_dir/${base_library_name}.a" ]
diff --git a/build/images/BUILD.gn b/build/images/BUILD.gn
index de41642..0c804a1 100644
--- a/build/images/BUILD.gn
+++ b/build/images/BUILD.gn
@@ -9,7 +9,6 @@
 import("//build/config/clang/clang.gni")
 import("//build/config/fuchsia/zircon.gni")
 import("//build/config/fuchsia/zircon_images.gni")
-import("//build/config/fuchsia/zircon_legacy_vars.gni")
 import("//build/dev.gni")
 import("//build/images/args.gni")
 import("//build/images/boot.gni")
@@ -137,7 +136,7 @@
   testonly = true
   visibility = [ ":*" ]
   deps = [
-    ":component_id_index_config_data",
+    ":component_id_index_config-config-data",
     ":universe_packages",
   ]
 }
@@ -608,6 +607,46 @@
 }
 default_image_deps += [ ":fuchsia" ]
 
+if (dev_fuchsia_zbi_kernel_cmdline_golden != "") {
+  action("fuchsia_zbi_verify") {
+    testonly = true
+
+    script = "//build/zbi/verify_zbi_kernel_cmdline.py"
+    stamp_file = "$target_gen_dir/$target_name.verified"
+    scrutiny_target = "//src/security/scrutiny/bin($host_toolchain)"
+    scrutiny_tool =
+        get_label_info(scrutiny_target, "root_out_dir") + "/scrutiny"
+
+    inputs = [
+      scrutiny_tool,
+      dev_fuchsia_zbi_kernel_cmdline_golden,
+      "$root_out_dir/fuchsia.zbi",
+    ]
+
+    outputs = [ stamp_file ]
+
+    args = [
+      "--zbi-file",
+      rebase_path(inputs[2], root_build_dir),
+      "--kernel-cmdline-golden-file",
+      rebase_path(dev_fuchsia_zbi_kernel_cmdline_golden, root_build_dir),
+      "--scrutiny",
+      rebase_path(scrutiny_tool, root_build_dir),
+      "--fuchsia-dir",
+      rebase_path("//", root_build_dir),
+      "--stamp",
+      rebase_path(stamp_file, root_build_dir),
+    ]
+
+    deps = [
+      ":fuchsia",
+      scrutiny_target,
+    ]
+  }
+
+  default_image_deps += [ ":fuchsia_zbi_verify" ]
+}
+
 if (!use_vboot && custom_signing_script == "") {
   update_manifest += [
     {
@@ -1030,8 +1069,8 @@
   ":blob.blk",
   ":data.blk",
   ":fvm.sparse.blk",
+  "//src/storage/bin/fvm($host_toolchain)",
   "//zircon/tools/blobfs($host_toolchain)",
-  "//zircon/tools/fvm($host_toolchain)",
   "//zircon/tools/minfs($host_toolchain)",
 ]
 filesystem_sizes_inputs += [
@@ -2107,13 +2146,13 @@
 # A component ID index maps component instance IDs to component monikers.
 # Indices are defined using the component_id_index() GN template. They are
 # merged together into a single index and supplied to appmgr using the
-# component_id_index_config_data() template, which produces a config_data().
+# component_id_index_config() template, which produces a config_data().
 #
 # If a system assembly contains components which use isolated storage, then it
-# needs include a component_id_index_config_data().
+# needs include a component_id_index_config().
 #
 # For more details, see //docs/development/components/component_id_index.md#system-assembly
-component_id_index_config_data("component_id_index_config_data") {
+component_id_index_config("component_id_index_config") {
   testonly = true
 
   # crawl for component_id_index()s in the base set.
@@ -2236,10 +2275,18 @@
   ]
 }
 
+# initialize and prepare the package repository.
+pm_prepare_publish("prepare_publish") {
+  testonly = true
+}
+
 # publish all packages to the package repository.
 pm_publish("publish") {
   testonly = true
-  deps = [ ":all_package_manifests.list" ]
+  deps = [
+    ":all_package_manifests.list",
+    ":prepare_publish",
+  ]
   inputs = [ all_package_manifests_list ]
 }
 
diff --git a/build/images/args.gni b/build/images/args.gni
index 9fd3c2d..7200e55 100644
--- a/build/images/args.gni
+++ b/build/images/args.gni
@@ -108,7 +108,7 @@
   compress_blobs = true
 
   # The format blobfs should store blobs in.
-  blob_layout_format = "padded"
+  blob_layout_format = "compact"
 
   # Build boot images that prefer Zedboot over local boot (only for EFI).
   always_zedboot = false
diff --git a/build/images/assemble_system.gni b/build/images/assemble_system.gni
index 09f2d56..07383d0 100644
--- a/build/images/assemble_system.gni
+++ b/build/images/assemble_system.gni
@@ -110,7 +110,7 @@
     compress_blobs = invoker.compress_blobs
   }
 
-  blob_layout_format = "padded"
+  blob_layout_format = "compact"
   if (defined(invoker.blob_layout_format)) {
     blob_layout_format = invoker.blob_layout_format
   }
diff --git a/build/images/fvm.gni b/build/images/fvm.gni
index dbc1ee6..bc78c15 100644
--- a/build/images/fvm.gni
+++ b/build/images/fvm.gni
@@ -36,12 +36,12 @@
   # selected for conservation of space, rather than performance.
   fvm_slice_size = "8388608"
 
-  # Number of slices reserved by FVM for internal usage. If set, a dummy
-  # partition will be addded to the FVM image, containing this many slices.
-  # If unset, then no reservation partition will be added.
-  # TODO(jfsulliv): Set to 1 by default so that we exercise this feature on all
-  # product configurations.
-  fvm_reserved_slices = ""
+  # Number of slices reserved by FVM for internal usage. A reservation
+  # partition will be added to the FVM image, containing this many slices.
+  # If set to the empty string, then no reservation partition will be added.
+  # Set to 1 by default so that we exercise this feature on all product
+  # configurations.
+  fvm_reserved_slices = "1"
 
   # Users of the filesystem may know required minimum load (total number of
   # data bytes and total number of files) on the fs. Following parameters
@@ -123,7 +123,7 @@
                              "testonly",
                              "visibility",
                            ])
-    tool = "//zircon/tools/fvm"
+    tool = "//src/storage/bin/fvm"
     outputs = [ invoker.output_name ]
     args = rebase_path(outputs, root_build_dir)
     if (defined(invoker.args)) {
diff --git a/build/images/guest/BUILD.gn b/build/images/guest/BUILD.gn
index bc7aee3..78ece62 100644
--- a/build/images/guest/BUILD.gn
+++ b/build/images/guest/BUILD.gn
@@ -39,7 +39,7 @@
     "//src/bringup/bin/pwrbtn-monitor:bootfs",
     "//src/bringup/bin/pwrbtn-monitor",
     "//src/bringup/bin/run-vc",
-    "//src/bringup/bin/svchost",
+    "//src/bringup/bin/svchost:bootfs",
     "//src/bringup/bin/sysinfo:bootfs",
     "//src/bringup/bin/sysinfo",
     "//src/bringup/bin/trace-benchmark",
diff --git a/build/images/manifest.gni b/build/images/manifest.gni
index bbc9326..e4c869a 100644
--- a/build/images/manifest.gni
+++ b/build/images/manifest.gni
@@ -18,24 +18,6 @@
   extra_manifest_args = []
 }
 
-# Manifests inherited from Zircon.
-# Paths in these manifest are relative to this build's root directory.
-zircon_aux_manifests = []
-zircon_aux_manifests_deps = []
-
-foreach(flavor, [ "legacy-aux" ]) {
-  _aux_flavor = {
-  }
-  _aux_flavor = {
-    target = "//build/unification/images:$flavor"
-    gen_dir = get_label_info(target, "target_out_dir")
-    name = get_label_info(target, "name")
-    file = "$gen_dir/$name.unification.manifest"
-  }
-  zircon_aux_manifests += [ _aux_flavor.file ]
-  zircon_aux_manifests_deps += [ _aux_flavor.target ]
-}
-
 # Action target that generates a response file in GN's "shlex" format.
 #
 # Parameters
@@ -176,14 +158,10 @@
     # they just supply libraries that might satisfy dependencies.
     sources += [ runtime_deps_manifest ]
     deps += [ ":${runtime_deps_manifest_target}" ]
-    sources += zircon_aux_manifests
-    deps += zircon_aux_manifests_deps
-    foreach(manifest, [ runtime_deps_manifest ] + zircon_aux_manifests) {
-      response_file_contents += [
-        "--cwd=.",
-        "--manifest=" + rebase_path(manifest),
-      ]
-    }
+    response_file_contents += [
+      "--cwd=.",
+      "--manifest=" + rebase_path(runtime_deps_manifest),
+    ]
 
     # Note that after the first '--output' argument, further `--manifest` or
     # `--entry` arguments in invoker.args will contribute to the output manifest.
diff --git a/build/images/recovery/sysmgr-eng-golden.json b/build/images/recovery/sysmgr-eng-golden.json
index ee4eb30..ccda79c 100644
--- a/build/images/recovery/sysmgr-eng-golden.json
+++ b/build/images/recovery/sysmgr-eng-golden.json
@@ -1,7 +1,8 @@
 {
   "apps": [
     "fuchsia-pkg://fuchsia.com/netcfg#meta/netcfg.cmx",
-    "fuchsia-pkg://fuchsia.com/system_recovery#meta/system_recovery.cmx"
+    "fuchsia-pkg://fuchsia.com/system_recovery#meta/system_recovery.cmx",
+    "fuchsia-pkg://fuchsia.com/timekeeper#meta/timekeeper.cmx"
   ],
   "services": {
     "fuchsia.net.NameLookup": "fuchsia-pkg://fuchsia.com/dns-resolver#meta/dns-resolver.cmx",
@@ -23,12 +24,12 @@
     "fuchsia.posix.socket.Provider": "fuchsia-pkg://fuchsia.com/netstack#meta/netstack.cmx",
     "fuchsia.stash.SecureStore": "fuchsia-pkg://fuchsia.com/stash#meta/stash_secure.cmx",
     "fuchsia.sysmem.Allocator": "fuchsia-pkg://fuchsia.com/sysmem_connector#meta/sysmem_connector.cmx",
-    "fuchsia.time.Utc": "fuchsia-pkg://fuchsia.com/timekeeper#meta/timekeeper.cmx",
     "fuchsia.wlan.device.service.DeviceService": "fuchsia-pkg://fuchsia.com/wlanstack#meta/wlanstack.cmx",
     "fuchsia.wlan.policy.AccessPointListener": "fuchsia-pkg://fuchsia.com/wlancfg#meta/wlancfg.cmx",
     "fuchsia.wlan.policy.AccessPointProvider": "fuchsia-pkg://fuchsia.com/wlancfg#meta/wlancfg.cmx",
     "fuchsia.wlan.policy.ClientListener": "fuchsia-pkg://fuchsia.com/wlancfg#meta/wlancfg.cmx",
     "fuchsia.wlan.policy.ClientProvider": "fuchsia-pkg://fuchsia.com/wlancfg#meta/wlancfg.cmx",
+    "fuchsia.wlan.product.deprecatedclient.DeprecatedClient": "fuchsia-pkg://fuchsia.com/wlancfg#meta/wlancfg.cmx",
     "fuchsia.wlan.product.deprecatedconfiguration.DeprecatedConfigurator": "fuchsia-pkg://fuchsia.com/wlancfg#meta/wlancfg.cmx",
     "fuchsia.wlan.service.Wlan": "fuchsia-pkg://fuchsia.com/wlancfg#meta/wlancfg.cmx"
   },
@@ -38,8 +39,8 @@
     "fuchsia.wlan.policy.ClientListener",
     "fuchsia.wlan.policy.AccessPointProvider",
     "fuchsia.wlan.policy.AccessPointListener",
-    "fuchsia.wlan.product.deprecatedconfiguration.DeprecatedConfigurator",
-    "fuchsia.time.Utc"
+    "fuchsia.wlan.product.deprecatedclient.DeprecatedClient",
+    "fuchsia.wlan.product.deprecatedconfiguration.DeprecatedConfigurator"
   ],
   "update_dependencies": [
     "fuchsia.posix.socket.Provider",
diff --git a/build/images/recovery/sysmgr-fdr-golden.json b/build/images/recovery/sysmgr-fdr-golden.json
index ee4eb30..ccda79c 100644
--- a/build/images/recovery/sysmgr-fdr-golden.json
+++ b/build/images/recovery/sysmgr-fdr-golden.json
@@ -1,7 +1,8 @@
 {
   "apps": [
     "fuchsia-pkg://fuchsia.com/netcfg#meta/netcfg.cmx",
-    "fuchsia-pkg://fuchsia.com/system_recovery#meta/system_recovery.cmx"
+    "fuchsia-pkg://fuchsia.com/system_recovery#meta/system_recovery.cmx",
+    "fuchsia-pkg://fuchsia.com/timekeeper#meta/timekeeper.cmx"
   ],
   "services": {
     "fuchsia.net.NameLookup": "fuchsia-pkg://fuchsia.com/dns-resolver#meta/dns-resolver.cmx",
@@ -23,12 +24,12 @@
     "fuchsia.posix.socket.Provider": "fuchsia-pkg://fuchsia.com/netstack#meta/netstack.cmx",
     "fuchsia.stash.SecureStore": "fuchsia-pkg://fuchsia.com/stash#meta/stash_secure.cmx",
     "fuchsia.sysmem.Allocator": "fuchsia-pkg://fuchsia.com/sysmem_connector#meta/sysmem_connector.cmx",
-    "fuchsia.time.Utc": "fuchsia-pkg://fuchsia.com/timekeeper#meta/timekeeper.cmx",
     "fuchsia.wlan.device.service.DeviceService": "fuchsia-pkg://fuchsia.com/wlanstack#meta/wlanstack.cmx",
     "fuchsia.wlan.policy.AccessPointListener": "fuchsia-pkg://fuchsia.com/wlancfg#meta/wlancfg.cmx",
     "fuchsia.wlan.policy.AccessPointProvider": "fuchsia-pkg://fuchsia.com/wlancfg#meta/wlancfg.cmx",
     "fuchsia.wlan.policy.ClientListener": "fuchsia-pkg://fuchsia.com/wlancfg#meta/wlancfg.cmx",
     "fuchsia.wlan.policy.ClientProvider": "fuchsia-pkg://fuchsia.com/wlancfg#meta/wlancfg.cmx",
+    "fuchsia.wlan.product.deprecatedclient.DeprecatedClient": "fuchsia-pkg://fuchsia.com/wlancfg#meta/wlancfg.cmx",
     "fuchsia.wlan.product.deprecatedconfiguration.DeprecatedConfigurator": "fuchsia-pkg://fuchsia.com/wlancfg#meta/wlancfg.cmx",
     "fuchsia.wlan.service.Wlan": "fuchsia-pkg://fuchsia.com/wlancfg#meta/wlancfg.cmx"
   },
@@ -38,8 +39,8 @@
     "fuchsia.wlan.policy.ClientListener",
     "fuchsia.wlan.policy.AccessPointProvider",
     "fuchsia.wlan.policy.AccessPointListener",
-    "fuchsia.wlan.product.deprecatedconfiguration.DeprecatedConfigurator",
-    "fuchsia.time.Utc"
+    "fuchsia.wlan.product.deprecatedclient.DeprecatedClient",
+    "fuchsia.wlan.product.deprecatedconfiguration.DeprecatedConfigurator"
   ],
   "update_dependencies": [
     "fuchsia.posix.socket.Provider",
diff --git a/build/input/BUILD.gn b/build/input/BUILD.gn
index 3fc51db..043550f 100644
--- a/build/input/BUILD.gn
+++ b/build/input/BUILD.gn
@@ -6,6 +6,7 @@
   testonly = true
   deps = [
     "board:bootfs_labels",
+    "dev:bootfs_labels",
     "product:bootfs_labels",
   ]
 }
diff --git a/build/input/dev/BUILD.gn b/build/input/dev/BUILD.gn
index 156f9ab..c92e029 100644
--- a/build/input/dev/BUILD.gn
+++ b/build/input/dev/BUILD.gn
@@ -4,6 +4,11 @@
 
 import("//build/dev.gni")
 
+group("bootfs_labels") {
+  testonly = true
+  deps = dev_bootfs_labels
+}
+
 group("build_only_deps") {
   testonly = true
   deps = dev_build_only_deps
diff --git a/build/intl/OWNERS b/build/intl/OWNERS
index b74e2a7..9da9685 100644
--- a/build/intl/OWNERS
+++ b/build/intl/OWNERS
@@ -1,6 +1,3 @@
-fmil@google.com
-kpozin@google.com
-shayba@google.com
-viktard@google.com
+include /src/intl/OWNERS
 
 # Components: I18N
diff --git a/build/package.gni b/build/package.gni
index 69d4a97..e5db7b2 100644
--- a/build/package.gni
+++ b/build/package.gni
@@ -287,6 +287,10 @@
       if (get_path_info(meta.dest, "extension") == "cmx") {
         cmx(manifest_target) {
           manifest = meta.path
+
+          # We don't know which deps are associated with which component manifest.
+          # Disable collecting expected includes to avoid false errors.
+          check_includes = false
           forward_variables_from(pkg,
                                  [
                                    "deps",
@@ -307,6 +311,10 @@
       } else if (get_path_info(meta.path, "extension") == "cml") {
         cm(manifest_target) {
           manifest = meta.path
+
+          # We don't know which deps are associated with which component manifest.
+          # Disable collecting expected includes to avoid false errors.
+          check_includes = false
           forward_variables_from(pkg,
                                  [
                                    "deps",
@@ -630,7 +638,10 @@
       args =
           manifest_args + [ "--entry=meta/package=" +
                             rebase_path(pkg_meta_output, "", target_out_dir) ]
-      deps = pkg.deps + [ ":$pkg_meta_generated" ]
+      deps = pkg.deps + [
+               ":$pkg_meta_generated",
+               "//zircon/public/sysroot:system_libc_deps",
+             ]
       public_deps = pkg.public_deps
     }
     manifest_file = get_target_outputs(":$manifest")
diff --git a/build/prebuilt/BUILD.gn b/build/prebuilt/BUILD.gn
index a94108d..0433f82 100644
--- a/build/prebuilt/BUILD.gn
+++ b/build/prebuilt/BUILD.gn
@@ -7,6 +7,8 @@
 # The //prebuilt directory contains only the binary artifacts managed by
 # jiri.  So this directory represents it in the build.
 
+# We assume that if jiri is tracking at least one CIPD package with attribute "debug-symbols"
+# then //prebuilt/.build-id exists.
 attributes = read_file("//.jiri_root/attributes.json", "json")
 debug_symbols = false
 foreach(attribute, attributes) {
@@ -15,13 +17,37 @@
   }
 }
 
-if (debug_symbols) {
-  prebuilt_binaries("prebuilt") {
-    testonly = true
-    build_id_dir = "//prebuilt/.build-id"
+_script = "//build/prebuilt/dir_exists.py"
+stable_prebuilt_build_id_dir_exists =
+    exec_script(python_exe_src,
+                [
+                  "-S",
+                  rebase_path(_script),
+                  rebase_path("//prebuilt/stable/.build-id"),
+                ],
+                "trim string",
+                [ _script ])
+
+group("prebuilt") {
+  testonly = true
+
+  deps = []
+
+  if (debug_symbols) {
+    deps += [ ":latest_or_generic" ]
   }
-} else {
-  group("prebuilt") {
-    testonly = true
+
+  if (stable_prebuilt_build_id_dir_exists == "True") {
+    deps += [ ":stable" ]
   }
 }
+
+prebuilt_binaries("latest_or_generic") {
+  testonly = true
+  build_id_dir = "//prebuilt/.build-id"
+}
+
+prebuilt_binaries("stable") {
+  testonly = true
+  build_id_dir = "//prebuilt/stable/.build-id"
+}
diff --git a/build/prebuilt/dir_exists.py b/build/prebuilt/dir_exists.py
new file mode 100755
index 0000000..90405a5
--- /dev/null
+++ b/build/prebuilt/dir_exists.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python3.8
+
+# Copyright 2020 The Fuchsia Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+
+if __name__ == '__main__':
+  print(os.path.isdir(sys.argv[1]))
diff --git a/build/rust/fidl_rust.gni b/build/rust/fidl_rust.gni
index 5ea6349..45cf013 100644
--- a/build/rust/fidl_rust.gni
+++ b/build/rust/fidl_rust.gni
@@ -6,6 +6,8 @@
 import("//build/fidl/toolchain.gni")
 import("//build/rust/toolchain.gni")
 
+fidl_rust_tools_dir = "//prebuilt/third_party/rust_tools/$host_platform"
+
 # Generates some Rust bindings for a FIDL library.
 #
 # The parameters for this template are defined in //build/fidl/fidl.gni. The
@@ -43,6 +45,8 @@
 
     tool = "//tools/fidl/fidlgen_rust"
 
+    # This action depends on rustfmt and rustfmt.toml, but instead of listing
+    # them here we do so in //tools/fidl/fidlgen_rust/BUILD.gn.
     inputs = [ json_representation ]
 
     outputs = [ filename ]
@@ -53,8 +57,9 @@
       "--output-filename",
       rebase_path(filename, root_build_dir),
       "--rustfmt",
-      rebase_path(
-          "//prebuilt/third_party/rust_tools/$host_platform/bin/rustfmt"),
+      rebase_path("$fidl_rust_tools_dir/bin/rustfmt", root_build_dir),
+      "--rustfmt-config",
+      rebase_path("//rustfmt.toml", root_build_dir),
     ]
 
     deps = [ ":$main_target_name($fidl_toolchain)" ]
diff --git a/build/secondary/third_party/dart-pkg/OWNERS b/build/secondary/third_party/dart-pkg/OWNERS
index e8bff30..f699cbe 100644
--- a/build/secondary/third_party/dart-pkg/OWNERS
+++ b/build/secondary/third_party/dart-pkg/OWNERS
@@ -1,4 +1 @@
-jasoncampbell@google.com
-chaselatta@google.com
-chandarren@google.com
-naudzghebre@google.com
+include /src/dart/OWNERS
diff --git a/build/secondary/third_party/dart-pkg/git/flutter/packages/flutter/BUILD.gn b/build/secondary/third_party/dart-pkg/git/flutter/packages/flutter/BUILD.gn
index 0c42b64..d1e6c7a 100644
--- a/build/secondary/third_party/dart-pkg/git/flutter/packages/flutter/BUILD.gn
+++ b/build/secondary/third_party/dart-pkg/git/flutter/packages/flutter/BUILD.gn
@@ -26,6 +26,6 @@
   ]
 
   if (is_fuchsia) {
-    deps += [ "//topaz/runtime/sky_engine:sky_engine_dart" ]
+    deps += [ "//src/flutter/sky_engine" ]
   }
 }
diff --git a/build/secondary/third_party/dart-pkg/git/flutter/packages/flutter_driver/BUILD.gn b/build/secondary/third_party/dart-pkg/git/flutter/packages/flutter_driver/BUILD.gn
index 5b8ff95..11cd3bf 100644
--- a/build/secondary/third_party/dart-pkg/git/flutter/packages/flutter_driver/BUILD.gn
+++ b/build/secondary/third_party/dart-pkg/git/flutter/packages/flutter_driver/BUILD.gn
@@ -31,6 +31,6 @@
   ]
 
   if (is_fuchsia) {
-    deps += [ "//topaz/runtime/sky_engine:sky_engine_dart" ]
+    deps += [ "//src/flutter/sky_engine" ]
   }
 }
diff --git a/build/testing/environments.gni b/build/testing/environments.gni
index e7704b4..809477b 100644
--- a/build/testing/environments.gni
+++ b/build/testing/environments.gni
@@ -12,6 +12,9 @@
 
   # The list of environment names to include in "basic_envs".
   basic_env_names = [ "emu" ]
+
+  # Adds GCE as a basic env if true.
+  enable_test_on_gce = false
 }
 
 _all_named_envs = []
@@ -133,6 +136,34 @@
   },
 ]
 
+vim3_env = {
+  dimensions = {
+    device_type = "Vim3"
+  }
+}
+_all_named_envs += [
+  {
+    name = "vim3"
+    env = vim3_env
+  },
+]
+
+if (enable_test_on_gce) {
+  basic_env_names += [ "gce" ]
+  gce_env = {
+    dimensions = {
+      device_type = "GCE"
+    }
+  }
+
+  _all_named_envs += [
+    {
+      name = "gce"
+      env = gce_env
+    },
+  ]
+}
+
 ### END Individual test environments
 
 ### Aliases ###
diff --git a/build/testing/platforms.gni b/build/testing/platforms.gni
index 781d0b3..231072f 100644
--- a/build/testing/platforms.gni
+++ b/build/testing/platforms.gni
@@ -98,9 +98,22 @@
     device_type = "Intel NUC Kit NUC7i5DNHE"
   },
 
-  # Experimental/FYI. fxbug.dev/43946
+  # Experimental/FYI.
+  # fxbug.dev/43946
   {
     device_type = "Nelson"
     cpu = "arm64"
   },
+
+  # fxbug.dev/62836
+  {
+    device_type = "Vim3"
+    cpu = "arm64"
+  },
+
+  # fxbug.dev/9127
+  {
+    device_type = "GCE"
+    cpu = "x64"
+  },
 ]
diff --git a/build/testing/test_spec.gni b/build/testing/test_spec.gni
index dd00cc3..37f1a5f 100644
--- a/build/testing/test_spec.gni
+++ b/build/testing/test_spec.gni
@@ -244,6 +244,7 @@
       "AEMU",
       "QEMU",
       "Intel NUC Kit NUC7i5DNHE",
+      "GCE",
     ]
   } else if (board_name == "qemu-arm64") {
     allowed_device_types = [ "QEMU" ]
@@ -260,6 +261,8 @@
     allowed_device_types = [ "Sherlock" ]
   } else if (board_name == "nelson") {
     allowed_device_types = [ "Nelson" ]
+  } else if (board_name == "vim3") {
+    allowed_device_types = [ "Vim3" ]
   }
 
   _environments = []
diff --git a/build/testing/zbi_test.gni b/build/testing/zbi_test.gni
index e5946c8..6190822 100644
--- a/build/testing/zbi_test.gni
+++ b/build/testing/zbi_test.gni
@@ -31,6 +31,12 @@
   input_target = "${target_name}_input"
   zbi_target = "${target_name}.zbi"
 
+  not_needed(invoker,
+             [
+               "qemu_kernel",
+               "timeout",
+             ])
+
   group_deps = []
 
   entries = []
@@ -50,17 +56,29 @@
   }
 
   zbi(zbi_target) {
-    forward_variables_from(invoker,
-                           "*",
-                           [
-                             "args",
-                             "device_types",
-                           ])
     testonly = true
+    data_deps = []
+    forward_variables_from(invoker,
+                           [
+                             "cpu",
+                             "compress",
+                             "data_deps",
+                             "output_dir",
+                             "output_extension",
+                             "output_name",
+                             "tags",
+                             "visibility",
+                           ])
+    if (defined(visibility)) {
+      visibility += [ ":$main_target" ]
+    }
     if (!defined(output_name)) {
       output_name = main_target
     }
-    deps += [ ":$input_target" ]
+    deps = invoker.deps + [ ":$input_target" ]
+    if (defined(invoker.qemu_kernel)) {
+      data_deps += [ invoker.qemu_kernel ]
+    }
   }
 
   group_deps += [ ":$zbi_target" ]
@@ -108,13 +126,21 @@
       zbi_tests = [
         {
           cpu = current_cpu
-          disabled = false
+          disabled = defined(invoker.tags) && invoker.tags + [ "disabled" ] -
+                                              [ "disabled" ] != invoker.tags
           name = main_target
           label = get_label_info(":$target_name", "label_with_toolchain")
           path = rebase_path(output_file, root_build_dir)
           bootserver_netboot = [ "--boot" ]
           success_string = zbi_test_success_string
           device_types = _device_types
+          if (defined(invoker.qemu_kernel)) {
+            assert(device_types == [ "QEMU" ],
+                   "`qemu_kernel` tests can only run on QEMU")
+            qemu_kernel_label =
+                get_label_info(invoker.qemu_kernel, "label_with_toolchain")
+          }
+          forward_variables_from(invoker, [ "timeout" ])
         },
       ]
     }
diff --git a/build/testing/zircon_zbi_tests.gni b/build/testing/zircon_zbi_tests.gni
deleted file mode 100644
index eb12750..0000000
--- a/build/testing/zircon_zbi_tests.gni
+++ /dev/null
@@ -1,7 +0,0 @@
-# Copyright 2020 The Fuchsia Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import("//build/config/fuchsia/zircon.gni")
-
-zircon_zbi_tests = read_file("$zircon_root_build_dir/zbi_tests.json", "json")
diff --git a/build/toolchain/concurrent_jobs.gni b/build/toolchain/concurrent_jobs.gni
index dc348ad..1ae5a8d 100644
--- a/build/toolchain/concurrent_jobs.gni
+++ b/build/toolchain/concurrent_jobs.gni
@@ -11,17 +11,23 @@
   rebase_path(_script, root_build_dir),
   "--reserve-memory=1GB",
 ]
-if (use_thinlto) {
-  _args += [
-    "--memory-per-job",
-    "local=2GB",
-  ]
-} else if (use_lto) {
-  _args += [
-    "--memory-per-job",
-    "local=4GB",
-  ]
+
+if (use_lto) {
+  # As per logic in //build/config/lto/config.gni, only use thinlto
+  # if lto is set
+  if (use_thinlto) {
+    _args += [
+      "--memory-per-job",
+      "local=2GB",
+    ]
+  } else {
+    _args += [
+      "--memory-per-job",
+      "local=4GB",
+    ]
+  }
 } else {
+  # No lto
   _args += [
     "--memory-per-job",
     "local=1GB",
diff --git a/build/toolchain/runtime/toolchain_runtime_deps.gni b/build/toolchain/runtime/toolchain_runtime_deps.gni
index cbb9dc3..c83da43 100644
--- a/build/toolchain/runtime/toolchain_runtime_deps.gni
+++ b/build/toolchain/runtime/toolchain_runtime_deps.gni
@@ -114,6 +114,49 @@
   ]
 }
 
+# Do the same for the 'libclang_rt.ubsan_standalone.so' which also depends
+# on libc++abi.so.
+_clang_runtime = []
+_clang_runtime = _new_clang_runtime
+
+_ubsan_runtimes_map = []
+foreach(entry, _clang_runtime) {
+  if (entry.cflags == [ "-fsanitize=undefined" ] && entry.ldflags == []) {
+    _targets = []
+    _targets = entry.target
+    _target = _targets[0]
+    if (string_replace(_target, "-fuchsia", "") != _target) {
+      _ubsan_runtimes_map += [
+        {
+          target = _target
+          runtime = entry.runtime
+        },
+      ]
+    }
+  }
+}
+
+_new_clang_runtime = []
+foreach(entry, _clang_runtime) {
+  _new_clang_runtime += [
+    {
+      forward_variables_from(entry, "*")
+      if (cflags == [ "-fsanitize=undefined" ] &&
+          ldflags == [ "-static-libstdc++" ]) {
+        _target = target[0]
+        if (string_replace(_target, "-fuchsia", "") != _target) {
+          foreach(_ubsan_entry, _ubsan_runtimes_map) {
+            if (_ubsan_entry.target == _target) {
+              runtime = []
+              runtime = _ubsan_entry.runtime
+            }
+          }
+        }
+      }
+    },
+  ]
+}
+
 _clang_runtime = []
 _clang_runtime = _new_clang_runtime
 
diff --git a/build/unification/BUILD.gn b/build/unification/BUILD.gn
index 06004d7..58910fa 100644
--- a/build/unification/BUILD.gn
+++ b/build/unification/BUILD.gn
@@ -5,8 +5,5 @@
 group("unification") {
   testonly = true
 
-  deps = [
-    "images",
-    "zbi",
-  ]
+  deps = [ "zbi" ]
 }
diff --git a/build/unification/images/BUILD.gn b/build/unification/images/BUILD.gn
index 05651ea..9f7ca5e 100644
--- a/build/unification/images/BUILD.gn
+++ b/build/unification/images/BUILD.gn
@@ -39,16 +39,7 @@
 # IMPORTANT NOTE: this target is now frozen, please do not add any more
 # dependencies to it.
 aggregate_manifest("legacy-aux") {
-  deps = [
-    ":lib.c",
-    ":lib.c.asan",
-    ":lib.c.asan-ubsan",
-    ":lib.c.profile",
-    ":lib.c.sancov",
-    ":lib.c.ubsan",
-    ":lib.c.ubsan-sancov",
-    ":lib.c.ubsan-sancov-full",
-  ]
+  deps = []
 
   reference = "legacy-aux-$target_cpu"
 }
diff --git a/build/unification/images/aggregate_manifest.gni b/build/unification/images/aggregate_manifest.gni
deleted file mode 100644
index dec4051..0000000
--- a/build/unification/images/aggregate_manifest.gni
+++ /dev/null
@@ -1,131 +0,0 @@
-# Copyright 2019 The Fuchsia Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import("//build/config/fuchsia/zircon.gni")
-import("//build/config/fuchsia/zircon_images.gni")
-
-# Merges manifests together to recreate an image manifest generated by the
-# Zircon build.
-#
-# Parameters
-#
-#   deps (required)
-#     List of targets generating a manifest.
-#
-#   reference (optional)
-#     Path to the Zircon image manifest that this target is replicating.
-
-template("aggregate_manifest") {
-  assert(defined(invoker.deps), "Aggregate manifest needs dependencies.")
-
-  manifest_target_name = "$target_name.manifest"
-
-  final_manifest = "$target_out_dir/$target_name.unification.manifest"
-
-  group_deps = []
-
-  # Merges all the dependent manifests together.
-  action(manifest_target_name) {
-    script = "//build/images/manifest.py"
-
-    inputs = []
-
-    outputs = [ final_manifest ]
-
-    response_file_contents = [
-      "--output",
-      rebase_path(outputs[0]),
-      "--unique",
-    ]
-
-    foreach(dep, invoker.deps) {
-      out_dir = get_label_info(dep, "target_out_dir")
-      name = get_label_info(dep, "name")
-      manifest = "$out_dir/$name.unification.manifest"
-      inputs += [ manifest ]
-      response_file_contents += [
-        "--manifest",
-        rebase_path(manifest),
-      ]
-    }
-
-    args = [ "@{{response_file_name}}" ]
-
-    deps = invoker.deps
-  }
-  group_deps += [ ":$manifest_target_name" ]
-
-  if (defined(invoker.reference)) {
-    # Look for the reference manifest in the list of Zircon-supplied images.
-    reference = invoker.reference
-    reference_manifest = false
-    foreach(image, zircon_images) {
-      if (image.type == "manifest") {
-        if (image.name == reference) {
-          assert(reference_manifest == false,
-                 "Duplicate manifest for $reference")
-          reference_manifest = "$zircon_root_build_dir/${image.path}"
-        }
-      }
-    }
-    assert(reference_manifest != false,
-           "Could not find manifest for $reference")
-
-    normalize_target_name = "$target_name.normalize"
-    compare_target_name = "$target_name.compare"
-    normalized_manifest = "$target_gen_dir/$target_name.normalized_ref.txt"
-
-    # The first step is to "rebase" the reference manifest so that its paths
-    # are also relative to the present build's root_build_dir.
-    action(normalize_target_name) {
-      script = "//build/images/manifest.py"
-
-      inputs = [ reference_manifest ]
-
-      outputs = [ normalized_manifest ]
-
-      args = [
-        "--output",
-        rebase_path(outputs[0]),
-        "--unique",
-        "--cwd",
-        rebase_path(zircon_root_build_dir, root_out_dir),
-        "--manifest",
-        rebase_path(inputs[0]),
-      ]
-    }
-
-    # Verify that the generated and reference manifest are identical.
-    action(compare_target_name) {
-      script = "compare_manifests.py"
-
-      inputs = [
-        final_manifest,
-        normalized_manifest,
-      ]
-
-      outputs = [ "$target_out_dir/$target_name.success.stamp" ]
-
-      args = [
-        "--generated",
-        rebase_path(final_manifest),
-        "--reference",
-        rebase_path(normalized_manifest),
-        "--stamp",
-        rebase_path(outputs[0]),
-      ]
-
-      deps = [
-        ":$manifest_target_name",
-        ":$normalize_target_name",
-      ]
-    }
-
-    group_deps += [ ":$compare_target_name" ]
-  }
-
-  group(target_name) {
-    public_deps = group_deps
-  }
-}
diff --git a/build/unification/images/artifacts.gni b/build/unification/images/artifacts.gni
deleted file mode 100644
index 4c50a2e..0000000
--- a/build/unification/images/artifacts.gni
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright 2019 The Fuchsia Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import("//build/config/fuchsia/zircon.gni")
-
-manifests =
-    read_file("$zircon_root_build_dir/legacy_unification-$target_cpu.json",
-              "json")
-zircon_artifacts = []
-
-foreach(manifest, manifests) {
-  if (manifest.cpu == target_cpu && manifest.os == target_os) {
-    _manifest_file = "$zircon_root_build_dir/${manifest.path}"
-    _lines = []
-    _lines = read_file(_manifest_file, "list lines")
-    _manifest_deps = []
-    _manifest_lines = []
-    foreach(line, _lines) {
-      dep = string_replace(line, "@", "")
-      if (dep != line) {
-        # This is a line of the format "@foobar", which denotes a dependency on
-        # "foobar".
-        _manifest_deps += [ ":$dep" ]
-      } else {
-        _manifest_lines += [ line ]
-      }
-    }
-    zircon_artifacts += [
-      {
-        name = manifest.name
-        deps = _manifest_deps
-        lines = _manifest_lines
-      },
-    ]
-  }
-}
diff --git a/build/unification/images/compare_manifests.py b/build/unification/images/compare_manifests.py
deleted file mode 100755
index e24af3a..0000000
--- a/build/unification/images/compare_manifests.py
+++ /dev/null
@@ -1,94 +0,0 @@
-#!/usr/bin/env python3.8
-# Copyright 2019 The Fuchsia Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import argparse
-import sys
-
-
-def read_contents(manifest):
-    with open(manifest, 'r') as manifest_file:
-        lines = manifest_file.readlines()
-        return dict([l.strip().split('=', 1) for l in lines])
-
-
-def main():
-    parser = argparse.ArgumentParser()
-    parser.add_argument(
-        '--generated', help='Path to the generated manifest', required=True)
-    parser.add_argument(
-        '--reference', help='Path to the reference manifest', required=True)
-    parser.add_argument('--stamp', help='Path to the stamp file', required=True)
-    args = parser.parse_args()
-
-    items_gen = read_contents(args.generated)
-    items_ref = read_contents(args.reference)
-
-    missing_keys_ref = [k for k in items_gen if k not in items_ref]
-    missing_keys_gen = [k for k in items_ref if k not in items_gen]
-    different_keys = [
-        k for k in items_gen if k in items_ref and items_gen[k] != items_ref[k]
-    ]
-
-    if not missing_keys_gen and not missing_keys_ref and not different_keys:
-        with open(args.stamp, 'w') as stamp_file:
-            stamp_file.write('Comparison successful \o/')
-        return 0
-
-    print('------------------------------------------------------------------')
-    print(
-        'This build step failed because the Zircon and Fuchsia builds are '
-        'out of sync.')
-
-    if missing_keys_gen:
-        print('')
-        print("Items not in generated manifest")
-        for item in sorted(missing_keys_gen):
-            print('- ' + item)
-        print('')
-        print(
-            'For items missing from the generated manifest, augment the '
-            'appropriate target in //build/unification/images/BUILD.gn '
-            'with a dependency on the missing item\'s target. For example, '
-            'if "bin/foobar" is missing, just add a dependency on the '
-            '":bin.foobar" target.')
-        print(
-            'Note that these targets are generated from metadata produced '
-            'by the Zircon build. If the target does not exist, please '
-            'verify that its original version in the Zircon build is '
-            'declared with a target of type "zx_something".')
-
-    if missing_keys_ref:
-        print('')
-        print("Items not in reference manifest")
-        for item in sorted(missing_keys_ref):
-            print('- ' + item)
-        print('')
-        print(
-            'For items not in the reference manifest, inspect the '
-            'dependencies of the failing target in '
-            '//build/unification/images/BUILD.gn and remove the extraneous '
-            'one.')
-
-    if different_keys:
-        print('')
-        print("Items with different paths")
-        for item in different_keys:
-            print('- ' + item)
-            print('   generated: ' + items_gen[item])
-            print('   reference: ' + items_ref[item])
-        print('')
-        print(
-            'If the generated and reference manifests cannot agree on the '
-            'path of a given object, then something is busted in the Zircon '
-            'build. Please see //build/unification/OWNERS for a list of '
-            'folks who can help.')
-
-    print('------------------------------------------------------------------')
-
-    return 1
-
-
-if __name__ == '__main__':
-    sys.exit(main())
diff --git a/build/unification/images/zircon_artifact.gni b/build/unification/images/zircon_artifact.gni
deleted file mode 100644
index 2cb857b..0000000
--- a/build/unification/images/zircon_artifact.gni
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright 2019 The Fuchsia Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import("//build/config/fuchsia/zircon.gni")
-import("//build/python/python.gni")
-import("aggregate_manifest.gni")
-
-# A binary object (app, driver, test) generated by the Zircon build.
-#
-# The purpose of this type is to exposes a binary built by the ZN build.
-# Traditionally, these are shipped to the GN build in aggregate form, i.e. as
-# part of a much larger .zbi or .manifest file.
-#
-# The binary is represented by a manifest using the BOOTFS format. This allows
-# the GN build to include granular listings of what goes into the various images
-# it produces instead of sharing the work with the ZN build.
-#
-# Once the contents of images are encoded in the GN build, moving a given binary
-# from ZN to GN becomes a matter of moving source code around and verifying that
-# the resulting binary is well-formed.
-#
-# Parameters
-#
-#   contents (required)
-#     A list of strings representing the manifest contents.
-
-template("zircon_artifact") {
-  assert(defined(invoker.contents), "Manifest contents are missing.")
-
-  manifest = "$target_out_dir/$target_name.original.manifest"
-  write_file(manifest, invoker.contents)
-
-  main_target_name = target_name
-  rebase_target_name = "$target_name.rebase"
-
-  action(rebase_target_name) {
-    script = python_exe_src
-
-    inputs = [ manifest ]
-
-    outputs = [ "$target_out_dir/$target_name.unification.manifest" ]
-
-    args = [
-      rebase_path("//build/images/manifest.py", root_build_dir),
-      "--output",
-      rebase_path(outputs[0], root_build_dir),
-      "--unique",
-      "--cwd",
-      rebase_path(zircon_root_build_dir, root_build_dir),
-      "--manifest",
-      rebase_path(inputs[0]),
-    ]
-  }
-
-  aggregate_manifest(main_target_name) {
-    deps = [ ":$rebase_target_name" ]
-    if (defined(invoker.deps)) {
-      deps += invoker.deps
-    }
-  }
-}
diff --git a/build/unification/lib/sysroot/BUILD.gn b/build/unification/lib/sysroot/BUILD.gn
index c5f0bec..111f321 100644
--- a/build/unification/lib/sysroot/BUILD.gn
+++ b/build/unification/lib/sysroot/BUILD.gn
@@ -2,9 +2,7 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-import("//build/unification/lib/zircon_runtime_library.gni")
-
-# The sysroot currently only provides a single library, libc.
-zircon_runtime_library("sysroot") {
-  library = "c"
+# TODO(60613): Remove the //vendor/... targets that depend on this.
+group("sysroot") {
+  deps = [ "//zircon/public/sysroot:system_libc_deps" ]
 }
diff --git a/build/unification/lib/zircon_runtime_library.gni b/build/unification/lib/zircon_runtime_library.gni
deleted file mode 100644
index b300582..0000000
--- a/build/unification/lib/zircon_runtime_library.gni
+++ /dev/null
@@ -1,91 +0,0 @@
-# Copyright 2020 The Fuchsia Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-assert(is_fuchsia, "Zircon libraries only available for Fuchsia targets")
-
-import("//build/config/fuchsia/zircon.gni")
-import("//build/unification/images/artifacts.gni")
-
-# Creates a group with distribution_entries metadata that looks like:
-#
-#   {
-#     destination = "lib/libtest-xxxxx.so"
-#     label = '//build/unification/lib/toolchain:libtest"
-#     source = "../../prebuilt/third_party/..../libtest-xxxxx.so"
-#   }
-#
-# Exposes a library provided by the ZN build to the manifest metadata collection
-# scheme.
-#
-# This template allows such libraries and their various variants to be included
-# in manifests generated from metadata.
-#
-# Parameters
-#
-#   library (required)
-#     [string] Name of the library to expose.
-template("zircon_runtime_library") {
-  assert(defined(invoker.library), "Must provide a library")
-
-  _variant_suffix = ""
-  if (toolchain_variant.instrumented && toolchain_variant.name != "") {
-    # Fuzzers just use the base variant.
-    _variant_suffix +=
-        "." + string_replace(toolchain_variant.name, "-fuzzer", "")
-  }
-  name = "lib.${invoker.library}" + _variant_suffix
-
-  # TODO(tmandry): Remove fallback logic once clang toolchain rolls.
-  fallback_name = false
-  if (defined(invoker.fallback)) {
-    fallback_name = "lib.${invoker.fallback}" + _variant_suffix
-  }
-
-  artifact = false
-  foreach(entry, zircon_artifacts) {
-    if (entry.name == name || entry.name == fallback_name) {
-      assert(artifact == false, "Duplicate Zircon library: $name")
-      artifact = entry
-    }
-  }
-  assert(artifact != false, "Could not find Zircon library: $name")
-  assert(artifact.deps == [],
-         "Cannot handle Zircon library $name, deps not supported")
-
-  script = "//build/unification/lib/format_metadata.py"
-  format_args = [
-    "-S",  # https://bugs.fuchsia.dev/p/fuchsia/issues/detail?id=62771
-    rebase_path(script, root_build_dir),
-    "--zircon-build-dir",
-    rebase_path(zircon_root_build_dir),
-    "--build-dir",
-    rebase_path(root_build_dir),
-
-    # NOTE: not including the toolchain here as this target can be reached from
-    # different paths due to its hacky nature, and label stability is key to
-    # ensure the build does not fail due to a bogus duplicate.
-    # This will go away naturally when these libraries are handled directly in
-    # the GN build.
-    "--label",
-    get_label_info(":$target_name", "label_no_toolchain"),
-  ]
-  foreach(line, artifact.lines) {
-    format_args += [
-      "--entry",
-      line,
-    ]
-  }
-
-  metadata_entries =
-      exec_script(python_exe_src, format_args, "json", [ script ])
-
-  group(target_name) {
-    forward_variables_from(invoker, [ "visibility" ])
-    metadata = {
-      # Used by the distribution_manifest template.
-      distribution_entries_barrier = []
-      distribution_entries = metadata_entries
-    }
-  }
-}
diff --git a/build/unification/zbi/BUILD.gn b/build/unification/zbi/BUILD.gn
index f564204..db9ecc6 100644
--- a/build/unification/zbi/BUILD.gn
+++ b/build/unification/zbi/BUILD.gn
@@ -21,6 +21,7 @@
     "//src/bringup/bin/bootsvc/integration_test:bootsvc-integration-tests",
     "//src/bringup/bin/bootsvc/integration_test:bootsvc-root-job-test",
     "//src/bringup/lib/mexec/tests",
+    "//zircon/kernel/phys:tests",
     "//zircon/system/utest/core:core-tests",
   ]
 
diff --git a/build/zbi/BUILD.gn b/build/zbi/BUILD.gn
new file mode 100644
index 0000000..5b4647b
--- /dev/null
+++ b/build/zbi/BUILD.gn
@@ -0,0 +1,57 @@
+# Copyright 2020 The Fuchsia Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/python/python.gni")
+import("//build/testing/environments.gni")
+import("//build/testing/host_test.gni")
+import("//build/testing/host_test_data.gni")
+
+scrutiny_target = "//src/security/scrutiny/bin($host_toolchain)"
+scrutiny_tool = get_label_info(scrutiny_target, "root_out_dir") + "/scrutiny"
+zbi_target = "//zircon/tools/zbi($host_toolchain)"
+zbi_tool = get_label_info(zbi_target, "root_out_dir") + "/zbi"
+if (host_os == "win") {
+  zbi_tool += ".exe"
+}
+
+host_test("verify_zbi_kernel_cmdline_test") {
+  binary_path = python_exe_path
+  args = [
+    rebase_path("${target_out_dir}/test_wrapper.py", root_build_dir),
+    rebase_path("${python_exe_path}", root_build_dir),
+    rebase_path("${target_out_dir}/verify_zbi_kernel_cmdline_test.py",
+                root_build_dir),
+    rebase_path(scrutiny_tool, root_build_dir),
+    rebase_path(zbi_tool, root_build_dir),
+  ]
+  deps = [
+    ":tools($host_toolchain)",
+    ":verify_zbi_kernel_cmdline_test_data($host_toolchain)",
+    "//build/python:interpreter",
+    scrutiny_target,
+    zbi_target,
+  ]
+}
+
+host_test_data("verify_zbi_kernel_cmdline_test_data") {
+  sources = [
+    "test_wrapper.py",
+    "verify_zbi_kernel_cmdline.py",
+    "verify_zbi_kernel_cmdline_test.py",
+  ]
+  outputs = [ "${target_out_dir}/{{source_file_part}}" ]
+}
+
+host_test_data("tools") {
+  sources = [
+    scrutiny_tool,
+    zbi_tool,
+  ]
+}
+
+group("tests") {
+  testonly = true
+
+  deps = [ ":verify_zbi_kernel_cmdline_test($host_toolchain)" ]
+}
diff --git a/build/zbi/test_wrapper.py b/build/zbi/test_wrapper.py
new file mode 100644
index 0000000..121de53
--- /dev/null
+++ b/build/zbi/test_wrapper.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python3.8
+# Copyright 2020 The Fuchsia Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""
+A wrapper script for setting up the environment for
+verify_zbi_kernel_cmdline_test.
+
+Usage:
+test_wrapper.py python_exe_path test_script scrutiny_path zbi_path
+"""
+
+import os
+import subprocess
+import sys
+
+
+def main(args):
+    python_path = os.path.abspath(args[0])
+    test_script = os.path.abspath(args[1])
+    os.environ['SCRUTINY'] = os.path.abspath(args[2])
+    os.environ['ZBI'] = os.path.abspath(args[3])
+    dir_path = os.path.dirname(os.path.realpath(__file__))
+    if not os.path.exists(os.environ['SCRUTINY']):
+        print('scrutiny is not found at ' + os.environ['SCRUTINY'])
+    if not os.path.exists(os.environ['ZBI']):
+        print('zbi is not found at ' + os.environ['ZBI'])
+
+    subprocess.check_output(
+        [python_path, '-m', 'unittest', '-v', test_script],
+        env=os.environ,
+        cwd=dir_path)
+
+
+if __name__ == '__main__':
+    sys.exit(main(sys.argv[1:]))
diff --git a/build/zbi/verify_zbi_kernel_cmdline.py b/build/zbi/verify_zbi_kernel_cmdline.py
new file mode 100644
index 0000000..0f12200
--- /dev/null
+++ b/build/zbi/verify_zbi_kernel_cmdline.py
@@ -0,0 +1,130 @@
+#!/usr/bin/env python3.8
+# Copyright 2020 The Fuchsia Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+'''Runs the zbi tool, taking care of unwrapping response files.'''
+
+import argparse
+import os
+import subprocess
+import sys
+import tempfile
+
+
+def main(input_args):
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        '--zbi-file', help='Path to the zbi to verify', required=True)
+    parser.add_argument(
+        '--scrutiny',
+        help='Path to the scrutiny tool used for verifying kernel cmdline',
+        required=True)
+    parser.add_argument(
+        '--fuchsia-dir',
+        help='Path to fuchsia root directory, required for scrutiny to work',
+        required=True)
+    parser.add_argument(
+        '--kernel-cmdline-golden-file',
+        help='Path to the kernel cmdline golden file',
+        required=True)
+    parser.add_argument(
+        '--stamp', help='Path to the victory file', required=True)
+    args = parser.parse_args(input_args)
+
+    if not verify_kernel_cmdline(args.scrutiny, args.zbi_file,
+                                 args.kernel_cmdline_golden_file,
+                                 args.fuchsia_dir):
+        return -1
+    with open(args.stamp, 'w') as stamp_file:
+        stamp_file.write('Golden!\n')
+    return 0
+
+
+def verify_kernel_cmdline(
+        scrutiny_path, zbi_path, kernel_cmdline_golden_file, fuchsia_dir):
+    for file in [scrutiny_path, zbi_path, kernel_cmdline_golden_file,
+                 fuchsia_dir]:
+        if not os.path.exists(file):
+            print('Path ' + file + ' not found')
+            return False
+    with tempfile.TemporaryDirectory() as tmp:
+        try:
+            subprocess.check_call(
+                [
+                    scrutiny_path, '-c',
+                    'tool.zbi.extract --input ' + zbi_path + ' --output ' + tmp
+                ],
+                env={'FUCHSIA_DIR': fuchsia_dir})
+        except subprocess.CalledProcessError as e:
+            print('Error: Failed to run scrutiny: {0}'.format(e))
+            return False
+        with open(os.path.join(tmp, 'sections', 'cmdline.blk'), 'r') as f:
+            # The cmdline.blk contains a trailing \x00.
+            cmdline = f.read().strip().rstrip('\x00')
+        with open(kernel_cmdline_golden_file, 'r') as f:
+            cmdline_golden_file = f.read().strip()
+        return compare_cmdline(
+            cmdline, cmdline_golden_file, kernel_cmdline_golden_file)
+
+
+class CmdlineFormatException(Exception):
+    """Exception thrown when kernel cmdline is in invalid format."""
+
+    def __init__(self):
+        Exception.__init__(self)
+
+
+def compare_cmdline(actual_cmdline, golden_cmdline, golden_file):
+    try:
+        golden_entries = parse_cmdline(golden_cmdline, '\n')
+        actual_entries = parse_cmdline(actual_cmdline, ' ')
+    except CmdlineFormatException:
+        return False
+    golden_cmd = generate_sorted_cmdline(golden_entries)
+    actual_cmd = generate_sorted_cmdline(actual_entries)
+    if golden_cmd != actual_cmd:
+        print('Kernel cmdline mismatch!')
+        print(
+            'Please update kernel cmdline golden file at ' + golden_file +
+            ' to:')
+        print('```')
+        print(actual_cmd)
+        print('```')
+        print()
+        print(
+            'To reproduce this error locally, use ' +
+            '`fx set --args=dev_fuchsia_zbi_kernel_cmdline_golden=\'"' +
+            golden_file.replace('../../', '//') + '"\'`')
+        return False
+    return True
+
+
+def parse_cmdline(cmdline, splitter):
+    cmdline_entries = {}
+    entries = cmdline.split(splitter)
+    for entry in entries:
+        key_value = entry.split('=')
+        if len(key_value) == 1:
+            if key_value[0] != '':
+                cmdline_entries[key_value[0]] = True
+        elif len(key_value) == 2:
+            cmdline_entries[key_value[0]] = key_value[1]
+        else:
+            print('Error: invalid kernel cmdline, key value pair:' + entry)
+            raise CmdlineFormatException
+
+    return cmdline_entries
+
+
+def generate_sorted_cmdline(entries):
+    items = []
+    for key in sorted(entries):
+        if entries[key] is True:
+            items.append(key)
+        else:
+            items.append(key + '=' + entries[key])
+    return '\n'.join(items)
+
+
+if __name__ == '__main__':
+    sys.exit(main(sys.argv[1:]))
diff --git a/build/zbi/verify_zbi_kernel_cmdline_test.py b/build/zbi/verify_zbi_kernel_cmdline_test.py
new file mode 100644
index 0000000..fb5c8cf
--- /dev/null
+++ b/build/zbi/verify_zbi_kernel_cmdline_test.py
@@ -0,0 +1,132 @@
+#!/usr/bin/env python3.8
+# Copyright 2020 The Fuchsia Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Unit test for verify_zbi_kernel_cmdline.py.
+
+Need to have SCRUTINY and ZBI environmental variables set.
+
+To manually run this test:
+
+  SCRUTINY=~/fuchsia/out/default/host_x64/scrutiny \
+  ZBI=~/fuchsia/out/default/host_x64/zbi python3 \
+  verify_zbi_kernel_cmdline_test.py
+"""
+import os
+import sys
+import subprocess
+import tempfile
+import unittest
+import unittest.mock as mock
+
+import verify_zbi_kernel_cmdline
+
+
+def verify_kernel_cmdline(golden, actual):
+    with tempfile.TemporaryDirectory() as test_folder:
+        golden_file = os.path.join(test_folder, 'golden')
+        stamp_file = os.path.join(test_folder, 'stamp')
+        fuchsia_folder = os.path.join(test_folder, 'fuchsia')
+        test_zbi = os.path.join(test_folder, 'test.zbi')
+        cmdline_file = os.path.join(test_folder, 'cmdline')
+        scrutiny = os.environ['SCRUTINY']
+        with open(golden_file, 'w+') as f:
+            f.write(golden)
+        with open(cmdline_file, 'wb+') as f:
+            f.write(actual)
+
+        # Use ZBI to create a test.zbi that only contains cmdline.
+        subprocess.check_call(
+            [os.environ['ZBI'], '-o', test_zbi, '-T', 'CMDLINE', cmdline_file])
+        os.mkdir(fuchsia_folder)
+
+        args = [
+            '--zbi-file', test_zbi, '--scrutiny', scrutiny, '--fuchsia-dir',
+            fuchsia_folder, '--kernel-cmdline-golden-file', golden_file,
+            '--stamp', stamp_file
+        ]
+        # Verify the cmdline in the generated ZBI.
+        return verify_zbi_kernel_cmdline.main(args)
+
+
+class RunVerifyZbiKernelCmdlineTest(unittest.TestCase):
+
+    def test_verify_kernel_cmdline_sucess_normal_case(self):
+        self.assertEqual(
+            0,
+            verify_kernel_cmdline(
+                'key1=v1\nkey2=v2\nkey3=v3', b'key1=v1 key2=v2 key3=v3'))
+
+    def test_verify_kernel_cmdline_success_order_diff(self):
+        self.assertEqual(
+            0,
+            verify_kernel_cmdline(
+                'key1=v1\nkey2=v2\nkey3=v3', b'key2=v2 key1=v1 key3=v3'))
+
+    def test_verify_kernel_cmdline_success_no_value_option(self):
+        self.assertEqual(
+            0, verify_kernel_cmdline('option1\noption2', b'option1 option2'))
+
+    def test_verify_kernel_cmdline_fail_golden_empty(self):
+        self.assertEqual(
+            -1, verify_kernel_cmdline('', b'key2=v2 key1=v1 key3=v3'))
+
+    def test_verify_kernel_cmdline_fail_missing_key2(self):
+        self.assertEqual(
+            -1, verify_kernel_cmdline('key1=v1\nkey2=v2', b'key1=v1'))
+
+    def test_verify_kernel_cmdline_fail_key1_mismatch(self):
+        self.assertEqual(
+            -1, verify_kernel_cmdline('key1=v1\nkey2=v2', b'key1=v2 key2=v2'))
+
+    def test_verify_kernel_cmdline_fail_key2_mismatch(self):
+        self.assertEqual(
+            -1, verify_kernel_cmdline('key1=v1\nkey2=v2', b'key1=v1 key2=v1'))
+
+    def test_verify_kernel_cmdline_fail_additional_key3(self):
+        self.assertEqual(
+            -1,
+            verify_kernel_cmdline(
+                'key1=v1\nkey2=v2', b'key1=v1 key2=v2 key3=v3'))
+
+    def test_verify_kernel_cmdline_fail_invalid_format(self):
+        self.assertEqual(
+            -1, verify_kernel_cmdline('key1=v1\nkey2=v2', b'invalid=format=1'))
+
+    def test_verify_kernel_cmdline_fail_option1_missing(self):
+        self.assertEqual(
+            -1, verify_kernel_cmdline('option1\noption2', b'option2'))
+
+    def test_verify_kernel_cmdline_fail_additional_option3(self):
+        self.assertEqual(
+            -1,
+            verify_kernel_cmdline(
+                'option1\noption2', b'option1 option2 option3'))
+
+    def verify_kernel_cmdline_zbi_not_found():
+        with tempfile.TemporaryDirectory() as test_folder:
+            golden_file = os.path.join(test_folder, 'golden')
+            stamp_file = os.path.join(test_folder, 'stamp')
+            fuchsia_folder = os.path.join(test_folder, 'fuchsia')
+            test_zbi = os.path.join(test_folder, 'test.zbi')
+            scrutiny = os.environ['SCRUTINY']
+            with open(golden_file, 'w+') as f:
+                f.write('option1')
+
+            # Do not create test_zbi
+
+            os.mkdir(fuchsia_folder)
+
+            args = [
+                '--zbi-file', test_zbi, '--scrutiny', scrutiny, '--fuchsia-dir',
+                fuchsia_folder, '--kernel-cmdline-golden-file', golden_file,
+                '--stamp', stamp_file
+            ]
+            self.assertEqual(-1, verify_zbi_kernel_cmdline.main(args))
+
+
+if __name__ == '__main__':
+    if 'SCRUTINY' not in os.environ or 'ZBI' not in os.environ:
+        print('Please set SCRUTINY and ZBI environmental path')
+        sys.exit(-1)
+    unittest.main()
diff --git a/build/zbi/zbi.gni b/build/zbi/zbi.gni
index 9a13109..abca11d 100644
--- a/build/zbi/zbi.gni
+++ b/build/zbi/zbi.gni
@@ -79,21 +79,21 @@
     outputs = [ rsp_file ]
   }
 
-  output_file = target_name
+  _output_name = target_name
   if (defined(invoker.output_name)) {
-    output_file = invoker.output_name
+    _output_name = invoker.output_name
   }
   if (defined(invoker.output_extension)) {
     if (invoker.output_extension != "") {
-      output_file += ".${invoker.output_extension}"
+      _output_name += ".${invoker.output_extension}"
     }
   } else {
-    output_file += ".zbi"
+    _output_name += ".zbi"
   }
   if (defined(invoker.output_dir)) {
-    output_file = "${invoker.output_dir}/$output_file"
+    output_file = "${invoker.output_dir}/${_output_name}"
   } else {
-    output_file = "$target_out_dir/$output_file"
+    output_file = "$target_out_dir/${_output_name}"
   }
   json_output_file = "$output_file.json"
 
@@ -109,7 +109,6 @@
                              "assert_no_deps",
                              "compress",
                              "data_deps",
-                             "metadata",
                              "testonly",
                              "visibility",
                            ])
@@ -170,5 +169,44 @@
     } else {
       args += [ "--compressed=$compress" ]
     }
+
+    metadata = {
+      images = []
+      migrated_zbi_input_args = []
+
+      # Another zbi() target that depends on this one will include this ZBI as
+      # input, but not this ZBI's inputs.
+      migrated_zbi_barrier = []
+
+      if (defined(invoker.metadata)) {
+        forward_variables_from(invoker.metadata, "*")
+      }
+
+      # For the //:images build_api_module().
+      images += [
+        {
+          label = get_label_info(":$target_name", "label_with_toolchain")
+          name = _output_name
+          path = rebase_path(output_file, root_build_dir)
+          type = "zbi"
+          cpu = cpu
+          compressed = !defined(invoker.compress) ||
+                       (invoker.compress != false && invoker.compress != "none")
+          if (defined(testonly) && testonly) {
+            testonly = true
+          }
+          forward_variables_from(invoker, [ "tags" ])
+        },
+      ]
+
+      # Provide metadata so that a zbi() target can also act as if it were a
+      # zbi_input() with `type = "zbi"` and $sources of this target's $outputs.
+      # Thus a zbi() target can be a dependency of another zbi() target to
+      # combine them without requiring an intervening zbi_input() target.
+      migrated_zbi_input_args += [
+        "--type=container",
+        rebase_path(output_file, root_build_dir),
+      ]
+    }
   }
 }
diff --git a/build/zircon/zbi_tests/BUILD.gn b/build/zircon/zbi_tests/BUILD.gn
index 4b300b8..6acddec 100644
--- a/build/zircon/zbi_tests/BUILD.gn
+++ b/build/zircon/zbi_tests/BUILD.gn
@@ -13,36 +13,5 @@
     "//:zbi_tests",
   ]
   testonly = true
-  zircon_entries = read_file("$zircon_root_build_dir/zbi_tests.json", "json")
-  metadata = {
-    images = []
-    zbi_tests = []
-    foreach(zbi_test, zircon_entries) {
-      if (defined(zbi_test.qemu_kernel_label)) {
-        foreach(image, zircon_images) {
-          assert(defined(image.label), "$image")
-          if (image.label == zbi_test.qemu_kernel_label) {
-            images += [
-              {
-                forward_variables_from(image, "*")
-                path = rebase_path(path, root_build_dir, zircon_root_build_dir)
-                label = string_replace(label, "//", "//zircon/")
-              },
-            ]
-          }
-        }
-      }
-      zbi_tests += [
-        {
-          forward_variables_from(zbi_test, "*")
-          path = rebase_path(path, root_build_dir, zircon_root_build_dir)
-          label = string_replace(label, "//", "//zircon/")
-          if (defined(qemu_kernel_label)) {
-            qemu_kernel_label =
-                string_replace(qemu_kernel_label, "//", "//zircon/")
-          }
-        },
-      ]
-    }
-  }
+  deps = [ "//zircon/kernel/phys:tests" ]
 }
diff --git a/bundles/BUILD.gn b/bundles/BUILD.gn
index 91795b5..999355f 100644
--- a/bundles/BUILD.gn
+++ b/bundles/BUILD.gn
@@ -67,15 +67,6 @@
   deps = [ "//src/connectivity/bluetooth/hci/emulator" ]
 }
 
-# TODO(fxbug.dev/63540): delete this. Use build_infra and test_infra directly.
-group("infratools") {
-  testonly = true
-  public_deps = [
-    ":build_infra",
-    ":test_infra",
-  ]
-}
-
 group("build_infra") {
   testonly = true
   public_deps = [ "//tools:build_infra" ]
@@ -173,6 +164,11 @@
   ]
 }
 
+# Diagnostics tools installed on the boot image.
+group("diagnostics-eng") {
+  deps = [ "//src/diagnostics/bundles:dev-tools" ]
+}
+
 # Group including all drivers in the fuchsia repo. Useful for ensuring
 # all driver continue to build without needing to build every board.
 group("drivers-build-only") {
diff --git a/bundles/bringup/BUILD.gn b/bundles/bringup/BUILD.gn
index c2cc3ca..e9d486c 100644
--- a/bundles/bringup/BUILD.gn
+++ b/bundles/bringup/BUILD.gn
@@ -9,127 +9,60 @@
   deps = [ "//src/testing/runtests" ]
 
   deps += [
-    "//src/bringup/bin/console:bootfs-test",
     "//src/bringup/bin/console-launcher:shell-permissions-bootfs-test",
     "//src/bringup/bin/console-launcher/qemu-tests:tests",
-    "//src/bringup/bin/ptysvc:ptysvc-test-bootfs-test",
     "//src/bringup/bin/svchost:crashsvc-test-bootfs-test",
     "//src/bringup/bin/sysinfo:sysinfo-bootfs-test",
     "//src/bringup/tests/bootfs_test:running-on-bootfs-test",
     "//src/devices/block/bin/biotime:biotime-test-bootfs-test",
-    "//src/devices/bus/lib/device-protocol-pdev/test:device-protocol-pdev-test-bootfs-test",
     "//src/devices/nand/drivers/broker/test:nand-broker-bootfs-test",  # HW-required.
     "//src/devices/nand/drivers/ram-nand:ram-nand-test-bootfs-test",
     "//src/devices/sysmem/tests/sysmem:sysmem-bootfs-test",
+    "//src/tests/reboot/dm_reboot_bringup_test:tests",
 
     # The devcoordinator-namespace-test assumes the root filesystem and can't be
     # migrated as packages are given their own local filesystem.
     "//src/devices/tests/devcoordinator-namespace:devcoordinator-namespace-bootfs-test",
     "//src/devices/tests/devfs:devfs-bootfs-test",
     "//src/devices/thermal/drivers/mtk-thermal:mtk-thermal-integration-bootfs-test",  # HW-required.
-    "//src/devices/usb/drivers/usb-bus:usb-device-bootfs-test",
     "//src/devices/usb/tests/usb:usb-bootfs-test",  # HW-required.
+    "//src/lib/framebuffer/test:framebuffer-test-bootfs-test",
     "//src/media/audio/drivers/intel-hda/test:intel-hda-bootfs-test",  # HW-required.
     "//src/media/audio/drivers/intel-hda/test:pixelbook-eve-bootfs-test",  # HW-required.
     "//src/zircon/tests/entropy:entropy-bootfs-test",
-    "//src/zircon/tests/events:events-bootfs-test",
     "//src/zircon/tests/exception:exception-bootfs-test",
     "//src/zircon/tests/futex-ownership:futex-ownership-bootfs-test",
-    "//src/zircon/tests/handle-alias:handle-alias-bootfs-test",
     "//src/zircon/tests/job-policy:policy-bootfs-test",
     "//src/zircon/tests/kcounter:kcounter-bootfs-test",
-    "//src/zircon/tests/kernel-clocks:bootfs_tests",
-    "//src/zircon/tests/kernel-cmdline:kernel-cmdline-bootfs-test",
-    "//src/zircon/tests/processor:processor-bootfs-test",
     "//src/zircon/tests/profile:profile-bootfs-test",
-    "//src/zircon/tests/property:property-bootfs-test",
-    "//src/zircon/tests/register-state:register-state-bootfs-test",
-    "//src/zircon/tests/status:status-bootfs-test",
-    "//src/zircon/tests/thread-initial-state:thread-initial-state-bootfs-test",
     "//src/zircon/tests/thread-state:thread-state-bootfs-test",
     "//src/zircon/tests/time:time-bootfs-test",
     "//src/zircon/tests/timers:timers-bootfs-test",
     "//src/zircon/tests/vdso:vdso-base-bootfs-test",
     "//src/zircon/tests/vdso:vdso-bootfs-test",
     "//src/zircon/tests/vdso:vdso-variant-bootfs-test",
-    "//src/zircon/tests/vmo:vmo-bootfs-test",
     "//zircon/system/ulib/c/test:cdebugdata-test-bootfs-test",
     "//zircon/system/ulib/c/test/sanitizer:sanitizer-utils-bootfs-test",
-    "//zircon/system/ulib/chromeos-disk-setup/test:chromeos-disk-setup-bootfs-test",
-    "//zircon/system/ulib/closure-queue/test:closure-queue-bootfs-test",
-    "//zircon/system/ulib/cobalt-client/test:cobalt-client-unit-bootfs-test",
-    "//zircon/system/ulib/debugdata/test:debugdata-bootfs-test",
     "//zircon/system/ulib/disk_inspector/test:disk_inspector_unit-bootfs-test",
-    "//zircon/system/ulib/driver-unit-test/test:logger-unit-bootfs-test",
     "//zircon/system/ulib/elf-search/test:elf-search-bootfs-test",
-    "//zircon/system/ulib/fbl/test:fbl-bootfs-test",
-    "//zircon/system/ulib/fdio-caller/test:fdio-caller-bootfs-test",
-    "//zircon/system/ulib/ffl/test:ffl-bootfs-test",
-    "//zircon/system/ulib/fidl-async/test:fidl-async-test-bootfs-test",
-    "//zircon/system/ulib/fit/test:fit-bootfs-test",
-    "//zircon/system/ulib/fit/test:fit-unittest-bootfs-test",
-    "//zircon/system/ulib/framebuffer/test:framebuffer-test-bootfs-test",
-    "//zircon/system/ulib/fs-pty/test:libfs-pty-test-bootfs-test",
-    "//zircon/system/ulib/fs/metrics/test:metrics-cobalt-bootfs-test",
-    "//zircon/system/ulib/fs/metrics/test:metrics-composite-latency-event-bootfs-test",
-    "//zircon/system/ulib/fs/metrics/test:metrics-inspect-bootfs-test",
-    "//zircon/system/ulib/fs/test:fs-connection-bootfs-test",
-    "//zircon/system/ulib/fs/test:fs-vnode-bootfs-test",
     "//zircon/system/ulib/gpt/test:gpt-unit-bootfs-test",
-    "//zircon/system/ulib/hwreg/test:libhwreg-bootfs-test",
-    "//zircon/system/ulib/hwreg/test:libhwreg-i2c-bootfs-test",
-    "//zircon/system/ulib/id_allocator/test:id_allocator-test-bootfs-test",
-    "//zircon/system/ulib/inet6/test:inet6-test-bootfs-test",
-    "//zircon/system/ulib/intel-hda/test:intel-hda-utils-test-bootfs-test",
-    "//zircon/system/ulib/io-scheduler/test:io-scheduler-test-bootfs-test",
-    "//zircon/system/ulib/ktrace/test:ktrace-test-bootfs-test",
     "//zircon/system/ulib/launchpad/test:launchpad-bootfs-test",
-    "//zircon/system/ulib/lazy_init/test:lazy_init-test-bootfs-test",
-    "//zircon/system/ulib/lockdep/test:lockdep-test-bootfs-test",
-    "//zircon/system/ulib/mipi-dsi/test:mipidsi-bootfs-test",
-    "//zircon/system/ulib/mock-function/test:mock-function-test-bootfs-test",
-    "//zircon/system/ulib/perftest/test:perftest-bootfs-test",
-    "//zircon/system/ulib/pretty/test:pretty-test-bootfs-test",
-    "//zircon/system/ulib/ram-crashlog/test:ram-crashlog-bootfs-test",
-    "//zircon/system/ulib/range/test:range-bootfs-test",
-    "//zircon/system/ulib/refcount/test:refcount-test-bootfs-test",
-    "//zircon/system/ulib/region-alloc/test:region-alloc-bootfs-test",
-    "//zircon/system/ulib/runtests-utils/test:service-proxy-dir-bootfs-test",
-    "//zircon/system/ulib/simplehid/test:simplehid-bootfs-test",
-    "//zircon/system/ulib/smbios/test:smbios-test-bootfs-test",
-    "//zircon/system/ulib/storage-metrics/test:storage-metrics-bootfs-test",
     "//zircon/system/ulib/syslog/test:syslog-bootfs-test",
     "//zircon/system/ulib/syslog/test:syslog-static-bootfs-test",
-    "//zircon/system/ulib/sysmem-make-tracking/test:sysmem-make-tracking-bootfs-test",
-    "//zircon/system/ulib/sysmem-version/test:sysmem-version-bootfs-test",
-    "//zircon/system/ulib/test-exceptions/test:test-exceptions-bootfs-test",
     "//zircon/system/ulib/test-utils/test:test-utils-bootfs-test",
-    "//zircon/system/ulib/tftp/test:tftp-test-bootfs-test",
-    "//zircon/system/ulib/trace-engine/test:hash-table-bootfs-test",
-    "//zircon/system/ulib/trace-provider/test:trace-provider-bootfs-test",
     "//zircon/system/ulib/trace-reader/test:trace-reader-test-bootfs-test",
-    "//zircon/system/ulib/trace-vthread/test:trace-vthread-test-bootfs-test",
-    "//zircon/system/ulib/trace-vthread/test:trace-vthread-with-static-engine-test-bootfs-test",
-    "//zircon/system/ulib/uart/test:uart-unittests-bootfs-test",
-    "//zircon/system/ulib/unittest/test:unittest-bootfs-test",
-    "//zircon/system/ulib/utf_conversion/test:utf_conversion-test-bootfs-test",
     "//zircon/system/ulib/zbi-bootfs/test:zbi-bootfs-test-bootfs-test",
-    "//zircon/system/ulib/zbi/test:zbi-bootfs-test",
-    "//zircon/system/ulib/zircon-crypto/test:zircon-crypto-bootfs-test",
     "//zircon/system/ulib/zx-panic-libc/test:zx-panic-libc-test-bootfs-test",
     "//zircon/system/ulib/zx/test:zx-bootfs-test",
     "//zircon/system/ulib/zxtest/test:zxtest-bootfs-test",
-    "//zircon/system/ulib/zxtest/test/integration:zxtest-integration-bootfs-test",
     "//zircon/system/utest/blobfs-bench:blobfs-bench-bootfs-test",
     "//zircon/system/utest/channel-fatal:channel-fatal-bootfs-test",
-    "//zircon/system/utest/cleanup:cleanup-bootfs-test",
-    "//zircon/system/utest/cmdline:cmdline-bootfs-test",
-    "//zircon/system/utest/compiler:compiler-bootfs-test",
     "//zircon/system/utest/core:core-c11-condvar-bootfs-test",
     "//zircon/system/utest/core:core-c11-mutex-bootfs-test",
     "//zircon/system/utest/core:core-c11-thread-bootfs-test",
     "//zircon/system/utest/core:core-channel-bootfs-test",
     "//zircon/system/utest/core:core-channel-call-etc-bootfs-test",
+    "//zircon/system/utest/core:core-channel-iovec-bootfs-test",
     "//zircon/system/utest/core:core-channel-write-etc-bootfs-test",
     "//zircon/system/utest/core:core-clock-bootfs-test",
     "//zircon/system/utest/core:core-elf-tls-bootfs-test",
@@ -165,40 +98,22 @@
     "//zircon/system/utest/core:core-vmar-bootfs-test",
     "//zircon/system/utest/core:core-vmo-bootfs-test",
     "//zircon/system/utest/core-display:core-display-bootfs-test",
-    "//zircon/system/utest/cprng:cprng-bootfs-test",
-    "//zircon/system/utest/ctor:ctor-bootfs-test",
     "//zircon/system/utest/debugger:debugger-bootfs-test",
     "//zircon/system/utest/device-enumeration:device-enumeration-bootfs-test",
     "//zircon/system/utest/dlfcn:dlfcn-bootfs-test",
-    "//zircon/system/utest/dlopen-indirect-deps:dlopen-indirect-deps-bootfs-test",
-    "//zircon/system/utest/errno:errno-bootfs-test",
-    "//zircon/system/utest/exit:exit-bootfs-test",
-    "//zircon/system/utest/fidl-utils:fidl-utils-bootfs-test",
-    "//zircon/system/utest/fpu:fpu-bootfs-test",
     "//zircon/system/utest/fs-bench:fs-bench-bootfs-test",
-    "//zircon/system/utest/getentropy:getentropy-bootfs-test",
-    "//zircon/system/utest/inspector:inspector-bootfs-test",
-    "//zircon/system/utest/int-types:int-types-bootfs-test",
     "//zircon/system/utest/kernel-unittests:kernel-unittests-bootfs-test",
     "//zircon/system/utest/log:log-bootfs-test",
-    "//zircon/system/utest/logger:logger-bootfs-test",
     "//zircon/system/utest/memfs:memfs-bootfs-test",
     "//zircon/system/utest/miscsvc:miscsvc-bootfs-test",
     "//zircon/system/utest/namespace:namespace-bootfs-test",
-    "//zircon/system/utest/posixio:posixio-bootfs-test",
-    "//zircon/system/utest/psutils:handles-bootfs-test",
     "//zircon/system/utest/psutils:ps-bootfs-test",
     "//zircon/system/utest/pty:pty-bootfs-test",
-    "//zircon/system/utest/qsort:qsort-bootfs-test",
     "//zircon/system/utest/race-tests:race-tests-bootfs-test",
     "//zircon/system/utest/runtests-utils:runtests-utils-bootfs-test",
     "//zircon/system/utest/svchost:svchost-bootfs-test",
-    "//zircon/system/utest/syscall-generation:syscall-generation-bootfs-test",
-    "//zircon/system/utest/sysconf:sysconf-bootfs-test",
     "//zircon/system/utest/task-utils:task-utils-bootfs-test",
-    "//zircon/system/utest/thread-safe-deleter:thread-safe-deleter-bootfs-test",
     "//zircon/system/utest/trace:trace-bootfs-test",
-    "//zircon/system/utest/trace:trace-static-engine-bootfs-test",
     "//zircon/system/utest/utc-procargs:utc-procargs-bootfs-test",
     "//zircon/third_party/scudo:tests",
     "//zircon/third_party/ulib/linenoise/test:linenoise-test-bootfs-test",
@@ -207,9 +122,7 @@
   if (target_cpu == "x64") {
     deps += [
       "//src/devices/bin/acpidump:acpidump-test-bootfs-test",
-      "//src/devices/board/drivers/x86:x86-battery-bootfs-test",
       "//src/devices/board/drivers/x86:x86-board-bootfs-test",
-      "//src/devices/board/drivers/x86:x86-cros-ec-bootfs-test",
       "//src/devices/board/drivers/x86:x86-lid-bootfs-test",
       "//src/devices/board/drivers/x86:x86-pwrsrc-bootfs-test",
       "//src/devices/board/drivers/x86:x86-thermal-bootfs-test",
diff --git a/bundles/buildbot/BUILD.gn b/bundles/buildbot/BUILD.gn
index b8d4085..ceb562b 100644
--- a/bundles/buildbot/BUILD.gn
+++ b/bundles/buildbot/BUILD.gn
@@ -33,9 +33,11 @@
     "//src/media/bundles:services",
     "//src/media/bundles:tools",
 
+    # TODO(fxbug.dev/66582) renable perf:touch_input_test.
     # Placed with core because core does not run scenic, in contrast to terminal bundle.
     # (perf:touch_input_test expects scenic _not_ to be running)
-    "//src/tests/end_to_end/perf:touch_input_test",
+    # "//src/tests/end_to_end/perf:touch_input_test",
+
     "//src/tests/end_to_end/screen_is_not_black:config",
     "//tools",
     "//tools:tests",
diff --git a/bundles/fidl/BUILD.gn b/bundles/fidl/BUILD.gn
index 6390e19..6fd1810 100644
--- a/bundles/fidl/BUILD.gn
+++ b/bundles/fidl/BUILD.gn
@@ -18,6 +18,7 @@
     "//src/tests/fidl/dangerous_identifiers:tests",
     "//src/tests/fidl/dart_bindings_test",
     "//src/tests/fidl/fidl_go_conformance:tests",
+    "//src/tests/fidl/fuzzer:tests",
     "//src/tests/fidl/source_compatibility:tests",
     "//third_party/go:fidl-tests",
     "//tools/fidl:tests",
diff --git a/docs/README.md b/docs/README.md
index 397938e..0cfb8cece 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -8,7 +8,7 @@
  - [Getting started](/docs/get-started/README.md) - everything you need to get started with Fuchsia
  - [Development](development/README.md) - instructions for building, running and
    testing Fuchsia and software that runs on Fuchsia
- - [System](/docs/concepts/README.md) - documentation for how Fuchsia works
+ - [System](/docs/concepts/index.md) - documentation for how Fuchsia works
  - [Zircon](/docs/concepts/kernel/README.md) - documentation for the Zircon kernel
  - [Run an example component](/docs/development/run/run-examples.md) - instructions for running examples
    on a device
diff --git a/docs/_toc.yaml b/docs/_toc.yaml
index da48047..9a8809a 100644
--- a/docs/_toc.yaml
+++ b/docs/_toc.yaml
@@ -28,3 +28,6 @@
 - name: "Governance"
   contents:
   - include: /docs/contribute/governance/_toc.yaml
+- name: "Community"
+  contents:
+  - include: /docs/contribute/community/_toc.yaml
\ No newline at end of file
diff --git a/docs/concepts/README.md b/docs/concepts/README.md
deleted file mode 100644
index c0a08a2..0000000
--- a/docs/concepts/README.md
+++ /dev/null
@@ -1,94 +0,0 @@
-# Overview
-
-Fuchsia is an open-source operating system designed from the ground up for
-security and updatability.
-
-## Fuchsia is…
-
-### Fuchsia is designed for security and privacy
-
-Security and privacy are woven deeply into the architecture of Fuchsia.  The
-basic building blocks of Fuchsia, the kernel primitives, are exposed to
-applications as object-capabilities, which means that applications running on
-Fuchsia have no ambient authority: applications can interact only with the
-objects to which they have been granted access explicitly.
-
-Software is delivered in hermetic packages and everything is sandboxed, which
-means all software that runs on the system, including applications and system
-components, receives the least privilege it needs to perform its job and gains
-access only to the information it needs to know.
-
-### Fuchsia is designed to be updatable 
-
-Fuchsia works by combining components delivered in packages.  Fuchsia packages
-are designed to be updated independently or even delivered ephemerally, which
-means packages are designed to come and go from the device as needed and the
-software is always up-to-date, like a Web page.
-
-Fuchsia aims to provide drivers with a binary-stable interface. In the future,
-drivers compiled for one version of Fuchsia will continue to work in future
-versions of Fuchsia without needing to be modified or even recompiled.  This
-approach means that Fuchsia devices will be able to update to newer versions of
-Fuchsia seamlessly while keeping their existing drivers.
-
-### Fuchsia is designed to be language and runtime agnostic
-
-Fuchsia currently supports a variety of languages and runtimes, including C++,
-Rust, Flutter, and Web.  Fuchsia is designed to let developers bring their own
-runtime, which means a developer can use a variety of languages or runtimes
-without needing to change Fuchsia itself.
-
-Applications interact with each other and the system using message passing,
-which means any application that can format messages appropriately can
-participate fully in the system regardless of its language or runtime.  Fuchsia
-is defined by these protocols, much like the Internet is defined by its
-protocols rather than a particular client or server implementation.
-
-### Fuchsia is designed for performance
-
-Fuchsia makes heavy use of asynchronous communication, which reduces latency by
-letting the sender proceed without waiting for the receiver.  Fuchsia optimizes
-memory use by avoiding garbage collection in the core operating system, which
-helps to minimize memory requirements to achieve equivalent performance.
-
-### Fuchsia is open source
-
-Fuchsia is built in the open using BSD/MIT-style open source licenses.
-Fuchsia has an inclusive community that welcomes high-quality, well-tested
-contributions from everyone.
-
-## Fuchsia is not…
-
-### Fuchsia is not based on Linux
-
-Fuchsia does not use the Linux kernel.  Instead, Fuchsia has its own kernel,
-Zircon, which evolved from LittleKernel.  Fuchsia implements some, but not all,
-of the POSIX specification as a library on top of the underlying kernel
-primitives, which focus on secure message passing and memory management.  Many
-core system services, such as file systems and networking, run outside the
-kernel in least-privilege, need-to-know sandboxes.
-
-### Fuchsia is not a microkernel
-
-Although Fuchsia applies many of the concepts popularized by microkernels,
-Fuchsia does not strive for minimality.  For example, Fuchsia has over 170
-syscalls, which is vastly more than a typical microkernel.  Instead of
-minimality, the system architecture is guided by practical concerns about
-security, privacy, and performance.  As a result, Fuchsia has a pragmatic,
-message-passing kernel.
-
-### Fuchsia is not a user experience
-
-Fuchsia is not tied to a specific end-user experience.  Instead, Fuchsia is
-general purpose and contains the building blocks necessary for creating a wide
-variety of high-quality user experiences.
-
-Fuchsia does have a developer experience, which lets developers write software
-for Fuchsia via SDKs and tools.
-
-### Fuchsia is not a science experiment
-
-Fuchsia's goal is to power production devices and products used for
-business-critical applications.  As such, Fuchsia is not a playground for
-experimental operating system concepts.  Instead, the platform roadmap is driven
-by practical use cases arising from partner and product needs.
\ No newline at end of file
diff --git a/docs/concepts/_toc.yaml b/docs/concepts/_toc.yaml
index ba2c1d2..7c5d615 100644
--- a/docs/concepts/_toc.yaml
+++ b/docs/concepts/_toc.yaml
@@ -2,16 +2,19 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-# Please, read https://fuchsia.dev/fuchsia-src/community/contribute/docs/documentation_navigation_toc
+# Please, read https://fuchsia.dev/fuchsia-src/contribute/docs/documentation-navigation-toc
 # before making changes to this file, and add a member of the fuchsia.dev
 # team as reviewer.
 toc:
 - title: "Overview"
-  path: /docs/concepts
+  path: /docs/concepts/index.md
 - title: "API"
   section:
   - include: /docs/concepts/api/_toc.yaml
-- title: "Architecture"
+- title: "Architecture principles"
+  section:
+  - include: /docs/concepts/principles/_toc.yaml
+- title: "Architecture support"
   section:
   - include: /docs/concepts/architecture/_toc.yaml
 - title: "Bluetooth"
@@ -76,6 +79,9 @@
 - title: "Testing"
   section:
   - include: /docs/concepts/testing/_toc.yaml
+- title: "Time"
+  section:
+  - include: /docs/concepts/time/_toc.yaml
 - title: "Tracing"
   section:
   - include: /docs/concepts/tracing/_toc.yaml
diff --git a/docs/concepts/api/cli.md b/docs/concepts/api/cli.md
index 207e094..1e46c51 100644
--- a/docs/concepts/api/cli.md
+++ b/docs/concepts/api/cli.md
@@ -176,11 +176,10 @@
 
 ### Style Guides
 
-Follow the corresponding [style guide](../README.md#languages) for the language
-and area of Fuchsia being developed. For example,
-if the tool is included with Zircon and written
-in C++, use the style guide for C++ in Zircon. Specifically, avoid creating a
-separate style guide for tools.
+Follow the corresponding [style guide](/docs/development/languages/README.md)
+for the language and area of Fuchsia being developed. For example, if the tool
+is included with Zircon and written in C++, use the style guide for C++ in
+Zircon. Specifically, avoid creating a separate style guide for tools.
 
 ### Runtime Link Dependencies
 
@@ -305,9 +304,13 @@
 
 Options must start with either one ('`-`') or two ('`--`') dashes followed by an
 alphanumeric label. In the case of a single dash, the length of the label must
-be 1. If the length of the label is two or more then two dashes must be used.
+be 1. If the length of the label is two or more, then two dashes must be used.
 For example: `-v` or `--help` are correct; `-help` is not valid.
 
+For option names with more than one word (for example, "foo bar"),
+you must use a single dash ('`-`') between words. For example, "foo bar"
+becomes `--foo-bar`.
+
 All choices are required to have a (`--`) option. Providing single character
 shorthand (`-`) is optional. E.g. it's okay to provide just `--output`, or both
 `-o` and `--output`, but it's not ok to only provide an `-o` option without a
@@ -441,7 +444,7 @@
 
 A tool must accept a `--help` switch and provide usage information to the
 command line in that case. The layout and syntax of the help text is described
-in a future document.
+in [CLI tool help requirements](/docs/concepts/api/cli_help.md).
 
 The tool must not do other work (i.e. have side effects) when displaying help.
 
diff --git a/docs/concepts/api/cli_help.md b/docs/concepts/api/cli_help.md
index e7785ee..85b87c9 100644
--- a/docs/concepts/api/cli_help.md
+++ b/docs/concepts/api/cli_help.md
@@ -12,7 +12,7 @@
 
 Help documentation must include a usage section written in
 [Usage](#usage) format, followed by a brief descriptions about
-the command, and the following sections (as needed): 
+the command, and the following sections (as needed):
 
 * Options
 * Commands
diff --git a/docs/concepts/api/dart.md b/docs/concepts/api/dart.md
index 2af9a50..84093c2 100644
--- a/docs/concepts/api/dart.md
+++ b/docs/concepts/api/dart.md
@@ -2,43 +2,114 @@
 
 [TOC]
 
-
 ## Overview
-This document describes heuristics and rules for writing Dart libraries that are published in the Fuchsia SDK.
 
-Unless otherwise specified, Fuchsia library authors should adhere to all the heuristics and rules recommended by the Dart team itself under [Effective Dart](https://www.dartlang.org/guides/language/effective-dart). Author’s should familiarize themselves with all sections, [Style](https://www.dartlang.org/guides/language/effective-dart/style), [Documentation](https://www.dartlang.org/guides/language/effective-dart/documentation), [Usage](https://www.dartlang.org/guides/language/effective-dart/usage) and [Design](https://www.dartlang.org/guides/language/effective-dart/design) prior to reading this rubric.
+This document describes heuristics and rules for writing Dart libraries that are
+published in the Fuchsia SDK.
+
+Unless otherwise specified, Fuchsia library authors should adhere to all the
+heuristics and rules recommended by the Dart team itself under [Effective
+Dart](https://www.dartlang.org/guides/language/effective-dart). Author’s should
+familiarize themselves with all sections,
+[Style](https://www.dartlang.org/guides/language/effective-dart/style),
+[Documentation](https://www.dartlang.org/guides/language/effective-dart/documentation),
+[Usage](https://www.dartlang.org/guides/language/effective-dart/usage) and
+[Design](https://www.dartlang.org/guides/language/effective-dart/design) prior
+to reading this rubric.
 
 ### Terminology
-There are some terms of art that Dart uses which conflict with Fuchsia’s terminology. 
 
-- [Fuchsia package](/src/sys/pkg/bin/pm/README.md#structure-of-a-fuchsia-package): A Fuchsia package is one or more collections of files that provide one or more programs, components or services for a Fuchsia system.
-- Fuchsia library: An informal definition for implementation code used by Fuchsia, usually found in lib or lib/src directories. Libraries are a convention, most policies for libraries are enforced socially or fallback to language specific approaches and tooling.
-- [Dart package](https://dart.dev/guides/packages): The Dart package system is used to share software like libraries and tools within the Dart ecosystem, e.g. via Pub. Often a package is a collection of files with a minimum of a pubspec.yaml file and at least one Dart file, in-tree Dart packages will also have a `BUILD.gn` file.
-- [Dart library](https://dart.dev/tools/pub/package-layout#public-libraries): A collection of Dart code (classes, constants, typedefs, etc.) isolated to a single namespace and corresponding to a single entry point, e.g. `import 'package:enchilada/enchilada.dart';` imports the enchilada library. Note that Dart libraries have a privacy boundary, e.g. private implementation details are not visible or accessible outside of the library. A Dart package can contain multiple Dart Libraries.
+There are some terms of art that Dart uses which conflict with Fuchsia’s
+terminology.
 
-When writing Dart code it is important to understand the distinction in terminology in order to remain clear when communicating with team members whose primary language might be one of the other supported languages (C++, Rust, etc.). 
+- [Fuchsia
+  package](/src/sys/pkg/bin/pm/README.md#structure-of-a-fuchsia-package): A
+  Fuchsia package is one or more collections of files that provide one or more
+  programs, components or services for a Fuchsia system.
+- Fuchsia library: An informal definition for implementation code used by
+  Fuchsia, usually found in lib or lib/src directories. Libraries are a
+  convention, most policies for libraries are enforced socially or fallback to
+  language specific approaches and tooling.
+- [Dart package](https://dart.dev/guides/packages): The Dart package system is
+  used to share software like libraries and tools within the Dart ecosystem,
+  e.g. via Pub. Often a package is a collection of files with a minimum of a
+  pubspec.yaml file and at least one Dart file, in-tree Dart packages will also
+  have a `BUILD.gn` file.
+- [Dart library](https://dart.dev/tools/pub/package-layout#public-libraries): A
+  collection of Dart code (classes, constants, typedefs, etc.) isolated to a
+  single namespace and corresponding to a single entry point, e.g. `import
+  'package:enchilada/enchilada.dart';` imports the enchilada library. Note that
+  Dart libraries have a privacy boundary, e.g. private implementation details
+  are not visible or accessible outside of the library. A Dart package can
+  contain multiple Dart Libraries.
+
+When writing Dart code it is important to understand the distinction in
+terminology in order to remain clear when communicating with team members whose
+primary language might be one of the other supported languages (C++, Rust,
+etc.).
 
 - A Fuchsia package can contain components implemented as Dart binaries.
-- A Dart binary is defined within a Dart package, and often has dependencies on other Dart packages.
-- Code shared as a library in Fuchsia’s tree written in Dart is implemented as a Dart package.
+- A Dart binary is defined within a Dart package, and often has dependencies on
+  other Dart packages.
+- Code shared as a library in Fuchsia’s tree written in Dart is implemented as a
+  Dart package.
 
 ### Focus on the Interfaces
-Public classes should expose a clean user interface which clearly describes the API surface and is free from internal implementation details. Classes which contain more than a minimal amount of functionality should expose their API in an abstract class with the implementation inside a private implementation file. Doing so allows for the users of the classes to focus on the public methods and forces the implementer to think about the usage of the class before implementation.
+
+Public classes should expose a clean user interface which clearly describes the
+API surface and is free from internal implementation details. Classes which
+contain more than a minimal amount of functionality should expose their API in
+an abstract class with the implementation inside a private implementation file.
+Doing so allows for the users of the classes to focus on the public methods and
+forces the implementer to think about the usage of the class before
+implementation.
 
 ### Consider Composability
-When designing the API consider how it will fit into the larger Dart ecosystem of libraries. For example, if writing an API which delivers events consider using Streams instead of callbacks because they compose better with libraries like Flutter.
+
+When designing the API consider how it will fit into the larger Dart ecosystem
+of libraries. For example, if writing an API which delivers events consider
+using Streams instead of callbacks because they compose better with libraries
+like Flutter.
 
 ## Lint Rules
-Dart code written against the Fuchsia SDK should pass all the lint rules specified by the [analysis_options.yaml](https://fuchsia.googlesource.com/topaz/+/HEAD/tools/analysis_options.yaml) file which lives in the topaz repository. These lint rules will help to automate the review API review process. There are situations where a lint rule may be in conflict with a specific API and may need to be explicitly ignored. If a file is opting out of a lint rule the developer must provide a comment explaining the reasoning for opting out of the lint rule.
+
+Dart code written against the Fuchsia SDK should pass all the lint rules
+specified by the
+[analysis_options.yaml](https://fuchsia.googlesource.com/topaz/+/HEAD/tools/analysis_options.yaml)
+file which lives in the topaz repository. These lint rules will help to automate
+the review API review process. There are situations where a lint rule may be in
+conflict with a specific API and may need to be explicitly ignored. If a file is
+opting out of a lint rule the developer must provide a comment explaining the
+reasoning for opting out of the lint rule.
 
 ## Library Structure
-When organizing the structure of a Dart package it is important to follow the recommendations laid out by the [Effective Dart](https://www.dartlang.org/guides/language/effective-dart) style guide. Additionally, developers should consider how their code is exported. For a more complicated package, developers should avoid a singular catch all top-level export file and rather expose a top level file per logical grouping of classes that make sense to be pulled under one import line. This allows users of the library the ability to have finer grained control over which sections of the library they import. An example is a package which contains functionality for both agents and modules. In this scenario, we could have one import for agents and one for modules but they could be in the same package. 
+
+When organizing the structure of a Dart package it is important to follow the
+recommendations laid out by the [Effective
+Dart](https://www.dartlang.org/guides/language/effective-dart) style guide.
+Additionally, developers should consider how their code is exported. For a more
+complicated package, developers should avoid a singular catch all top-level
+export file and rather expose a top level file per logical grouping of classes
+that make sense to be pulled under one import line. This allows users of the
+library the ability to have finer grained control over which sections of the
+library they import. An example is a package which contains functionality for
+both agents and modules. In this scenario, we could have one import for agents
+and one for modules but they could be in the same package.
 
 ## Comments/Documentation
-All comments should adhere to [Effective Dart: Documentation](https://www.dartlang.org/guides/language/effective-dart/documentation) as well as the [Fuchsia Documentation](documentation.md) guide.
+
+All comments should adhere to [Effective Dart:
+Documentation](https://www.dartlang.org/guides/language/effective-dart/documentation)
+as well as the [Fuchsia Documentation](documentation.md) guide.
 
 ## Dependencies
-Packages written for the Dart Fuchsia SDK should not take on third party dependencies that are not themselves also in the Fuchsia SDK. Exceptions will be made for the following, well established, dependencies which are likely to be present in all environments. Any packages which should be added to this list must be approved by the [API Council](/docs/contribute/governance/api_council.md).
+
+Packages written for the Dart Fuchsia SDK should not take on third party
+dependencies that are not themselves also in the Fuchsia SDK. Exceptions will be
+made for the following, well established, dependencies which are likely to be
+present in all environments. Any packages which should be added to this list
+must be approved by the [API
+Council](/docs/contribute/governance/api_council.md).
 
 - [logger](https://pub.dev/packages/logging)
 - [meta](https://pub.dev/packages/meta)
@@ -46,29 +117,47 @@
 - [flutter](https://flutter.dev/)
 
 
-Packages which do take on external dependencies should consider whether they want to reexport those symbols. If the dependency is reexported then the generated documentation will generate documentation for the external dependency. However, reexporting the dependency will create a tight coupling between package versions.
+Packages which do take on external dependencies should consider whether they
+want to reexport those symbols. If the dependency is reexported then the
+generated documentation will generate documentation for the external dependency.
+However, reexporting the dependency will create a tight coupling between package
+versions.
 
 ## Formatting
-Code should be formatted using the `dartfmt` tool. This is an opinionated tool that cannot be configured. Formatting all of our code with this tool will ensure consistency. In Fuchsia, you can use `fx format-code` will run `dartfmt` on all staged dart files. 
+
+Code should be formatted using the `dartfmt` tool. This is an opinionated tool
+that cannot be configured. Formatting all of our code with this tool will ensure
+consistency. In Fuchsia, you can use `fx format-code` will run `dartfmt` on all
+staged dart files.
 
 ## Files
 
 - DO name files after their public class name
-- PREFER placing each class into their own files, even if they’re private. It should be rare for multiple classes to live in the same file. Only private, small, simple and standalone classes can share a file with a public class.
-- AVOID creating utility classes or libraries, these tend to turn into code dumping grounds. Instead, use precise naming that clearly communicates the purpose of the code being created.
+- PREFER placing each class into their own files, even if they’re private. It
+  should be rare for multiple classes to live in the same file. Only private,
+  small, simple and standalone classes can share a file with a public class.
+- AVOID creating utility classes or libraries, these tend to turn into code
+  dumping grounds. Instead, use precise naming that clearly communicates the
+  purpose of the code being created.
 - DON’T use the `part of` directive to avoid tight coupling of classes.
 
 ### Methods
 
-- PREFER using named parameters vs positional parameters for public methods on public classes that have greater than 2 parameters. This aids code refactor and allowed adding extra parameters without breaking the public API contract. 
-- AVOID using functions which can do more than one thing like `void updateAndCommit();` but prefer explicit naming 
+- PREFER using named parameters vs positional parameters for public methods on
+  public classes that have greater than 2 parameters. This aids code refactor
+  and allowed adding extra parameters without breaking the public API contract.
+- AVOID using functions which can do more than one thing like `void
+  updateAndCommit();` but prefer explicit naming.
 
 ### Constructors
 
-- PREFER using named parameters with Constructors that have more than two parameters.
+- PREFER using named parameters with Constructors that have more than two
+  parameters.
 - DO use the meta package to indicate which parameters are required.
 - DO assert on required parameters.
-- DO throw exceptions/errors for public API which will have detrimental side effects if invalid input is passed to constructors since asserts do not run in release builds.
+- DO throw exceptions/errors for public API which will have detrimental side
+  effects if invalid input is passed to constructors since asserts do not run in
+  release builds.
 
 ```
 /// Constructs a [Car] object
@@ -84,7 +173,8 @@
 ```
 
 ## Naming
-If a method will use a cached object, or create it if it doesn’t exist, avoid introducing or into the name.
+If a method will use a cached object, or create it if it doesn’t exist, avoid
+introducing or into the name.
 
 ```
 class Node {
@@ -96,7 +186,10 @@
 }
 ```
 
-When adding a function or interface which will have methods invoked in response to another action, name the methods add<NAME>Listener() and remove<NAME>Listener(). The objects which implement the <NAME>Listener interface should name the invoked methods on<EVENT>.
+When adding a function or interface which will have methods invoked in response
+to another action, name the methods add<NAME>Listener() and
+remove<NAME>Listener(). The objects which implement the <NAME>Listener interface
+should name the invoked methods on<EVENT>.
 
 ```
 class MediaController {
@@ -110,9 +203,14 @@
 }
 ```
 
-When appending an item to your object prefer the name add<Name> instead of append to follow the dart list naming.
+When appending an item to your object prefer the name add<Name> instead of
+append to follow the dart list naming.
 
-When deciding between using a single member abstract or a plain `Function` as a `Listener` object consider how your API might evolve over time. If you expect that you may add more methods to the listener use a single member abstract to allow for the evolution but if the API is not likely to change use a plain function.
+When deciding between using a single member abstract or a plain `Function` as a
+`Listener` object consider how your API might evolve over time. If you expect
+that you may add more methods to the listener use a single member abstract to
+allow for the evolution but if the API is not likely to change use a plain
+function.
 
 ```
 // This could logically grow to include an onDoubleTap()
@@ -128,12 +226,18 @@
 ```
 
 ## Preferred Types
-Concrete data types should be used instead of lower level primitives. The following types should be used when possible:
+Concrete data types should be used instead of lower level primitives. The
+following types should be used when possible:
 
-- [Duration](https://api.dartlang.org/stable/2.4.0/dart-core/Duration-class.html) when working with a span of time.
-- [DateTime](https://api.dartlang.org/stable/2.4.0/dart-core/DateTime-class.html) when working with dates.
+- [Duration](https://api.dartlang.org/stable/2.4.0/dart-core/Duration-class.html)
+  when working with a span of time.
+- [DateTime](https://api.dartlang.org/stable/2.4.0/dart-core/DateTime-class.html)
+  when working with dates.
 
-If there is not a concrete type which can be used to represent your object at a higher level your API should expose one. For example, if we had an API which dealt with currency we would create a `Currency` data type instead of working with `num` types. 
+If there is not a concrete type which can be used to represent your object at a
+higher level your API should expose one. For example, if we had an API which
+dealt with currency we would create a `Currency` data type instead of working
+with `num` types.
 
 ```
 // BAD
@@ -143,7 +247,8 @@
 Currency getCash() { ... }
 ```
 
-Your API should avoid returning unstructured JSON data but rather transform any JSON into a typed value.
+Your API should avoid returning unstructured JSON data but rather transform any
+JSON into a typed value.
 
 ```
 // BAD
@@ -157,25 +262,51 @@
 ```
 
 ## Internationalization
-If a package exposes a user visible string the string should be internationalized. In the absence of an ability to internationalize a user visible string the API should return data in which a user of a library can construct an internationalized string.
+If a package exposes a user visible string the string should be
+internationalized. In the absence of an ability to internationalize a user
+visible string the API should return data in which a user of a library can
+construct an internationalized string.
 
-Exceptions and log messages do not need to be internationalized if they are not intended to be user visible.
+Exceptions and log messages do not need to be internationalized if they are not
+intended to be user visible.
 
 ## Error Handling
-All error handling should adhere to [Effective Dart: Error handling](https://www.dartlang.org/guides/language/effective-dart/usage#error-handling). 
+All error handling should adhere to [Effective Dart: Error
+handling](https://www.dartlang.org/guides/language/effective-dart/usage#error-handling).
+
 
 ## Error vs. Exception
-Error and its subclasses are for programmatic errors that shouldn’t be explicitly caught. An Error indicates a bug in your code, it should unwind the entire call stack, halt the program, and print a stack trace so you can locate and fix the bug.
+Error and its subclasses are for programmatic errors that shouldn’t be
+explicitly caught. An Error indicates a bug in your code, it should unwind the
+entire call stack, halt the program, and print a stack trace so you can locate
+and fix the bug.
 
-Non-Error exception classes are for runtime errors. If your API implementation throws an exception, it should be documented as part of the public API and it’s expected behavior. This will facilitate programmatic handling of the exception by API clients.
+Non-Error exception classes are for runtime errors. If your API implementation
+throws an exception, it should be documented as part of the public API and it’s
+expected behavior. This will facilitate programmatic handling of the exception
+by API clients.
 
-Except in a few special circumstances, idiomatic Dart should throw Errors, but never catch them. They exist specifically to not be caught so that they take down the app and alert the programmer to the location of the bug.
+Except in a few special circumstances, idiomatic Dart should throw Errors, but
+never catch them. They exist specifically to not be caught so that they take
+down the app and alert the programmer to the location of the bug.
 
-Note: often times people refer to Error when they mean Exception and vice versa. Especially developers that are coming from a different language. Apply your knowledge of their difference when developing your Dart API.
+Note: often times people refer to Error when they mean Exception and vice versa.
+Especially developers that are coming from a different language. Apply your
+knowledge of their difference when developing your Dart API.
 
-Your public API should throw well defined and typed exceptions so that users can catch them and react appropriately. If you are not in control of all the code that is being called by your package, maybe because you are using a third party library, you may not be able to know exactly which exceptions may be thrown. If this is the case, you can either attempt to catch the exception and wrap it in a type that you create or clearly document that an exception of unknown type may be thrown.
+Your public API should throw well defined and typed exceptions so that users can
+catch them and react appropriately. If you are not in control of all the code
+that is being called by your package, maybe because you are using a third party
+library, you may not be able to know exactly which exceptions may be thrown. If
+this is the case, you can either attempt to catch the exception and wrap it in a
+type that you create or clearly document that an exception of unknown type may
+be thrown.
 
-If your API can fail in more than one way the exception should clearly indicate the failure method. Consider throwing different types of exceptions or adding a code to the exception so the caller can respond appropriately. Also, don’t forget to publicly document all the exceptions that are potentially thrown by a given method.
+If your API can fail in more than one way the exception should clearly indicate
+the failure method. Consider throwing different types of exceptions or adding a
+code to the exception so the caller can respond appropriately. Also, don’t
+forget to publicly document all the exceptions that are potentially thrown by a
+given method.
 
 ```
 enum ErrorCode { foo, bar }
@@ -197,11 +328,20 @@
 ```
 
 ### Assertions vs. Exceptions
-Assertions should only be used to verify conditions that should be logically impossible to be false due to programmer error, not user or data input. These conditions should only be based on inputs generated by your own code. Any checks based on external inputs should use exceptions.
+Assertions should only be used to verify conditions that should be logically
+impossible to be false due to programmer error, not user or data input. These
+conditions should only be based on inputs generated by your own code. Any checks
+based on external inputs should use exceptions.
 
-Use asserts when you are in full control of the inputs. For example verify private functions' arguments with asserts, and using exceptions for public functions arguments.
+Use asserts when you are in full control of the inputs. For example verify
+private functions' arguments with asserts, and using exceptions for public
+functions arguments.
 
-In Dart all assertions are compiled out from the production/release builds. Therefore, your program must work just as well when all assertions are removed. Do not directly assert on a value returned directly from a function as this can cause the code to not be included in release build since the entire body of the assert is removed in release builds.
+In Dart all assertions are compiled out from the production/release builds.
+Therefore, your program must work just as well when all assertions are removed.
+Do not directly assert on a value returned directly from a function as this can
+cause the code to not be included in release build since the entire body of the
+assert is removed in release builds.
 
 ```
 // BAD
@@ -213,7 +353,12 @@
 ```
 
 ### FIDL Exception Handling
-In Fuchsia, the generated Dart FIDL bindings are always asynchronous, thus all methods return a `Future` even if there is no return value (`Future<void>` is used). Also, when connecting to a particular service the connection is assumed to be successful even though it can fail to connect or disconnect in the future. For these reasons, the caller of any FIDL api should always assume that a specific call can fail and handle that appropriately when needed. 
+In Fuchsia, the generated Dart FIDL bindings are always asynchronous, thus all
+methods return a `Future` even if there is no return value (`Future<void>` is
+used). Also, when connecting to a particular service the connection is assumed
+to be successful even though it can fail to connect or disconnect in the future.
+For these reasons, the caller of any FIDL api should always assume that a
+specific call can fail and handle that appropriately when needed.
 
 ```
 final _proxy = fidl_myService.MyServiceProxy();
@@ -227,12 +372,16 @@
 ```
 
 ## Testing
-Please review [Dart](https://www.dartlang.org/guides/testing) and [Flutter](https://flutter.dev/docs/testing) testing guides.
+Please review [Dart](https://www.dartlang.org/guides/testing) and
+[Flutter](https://flutter.dev/docs/testing) testing guides.
 
-- DO test for `Future<T>` when disambiguating a `FutureOr<T>` whose type argument could be Object.
+- DO test for `Future<T>` when disambiguating a `FutureOr<T>` whose type
+  argument could be Object.
 - DON’T use `@visibleForTesting` on public API.
 
-The API surface of your package should be well tested. However, the public API should not need to leak internal details for the class to be testable. Consider the following example:
+The API surface of your package should be well tested. However, the public API
+should not need to leak internal details for the class to be testable. Consider
+the following example:
 
 ```
 class Foo {
@@ -246,8 +395,14 @@
 }
 ```
 
-Rather, consider writing your class as an abstract class so the user does not need to know about the injection of global services but tests can directly inject the global services into the implementation. These avoids leaking implementation details to the user and provides an API that the user cannot abuse or mess up. This has the added advantage of allowing the API to evolve if the GlobalServices class evolves without having to change the callers of the method.
- 
+Rather, consider writing your class as an abstract class so the user does not
+need to know about the injection of global services but tests can directly
+inject the global services into the implementation. These avoids leaking
+implementation details to the user and provides an API that the user cannot
+abuse or mess up. This has the added advantage of allowing the API to evolve if
+the GlobalServices class evolves without having to change the callers of the
+method.
+
 ```
 // foo.dart
 abstract class Foo {
@@ -266,7 +421,13 @@
 }
 ```
 
-Dart does not allow a private class/function to be accessed from within a test. This has the effect that any private classes cannot be tested. This may be ok if there is a corresponding public class/function that can exercise the private members but this may not always be the case. In these situations it is best to move the private class into its own file which does not get exported by the top-level export and make it public. The tests can now access your private members.
+Dart does not allow a private class/function to be accessed from within a test.
+This has the effect that any private classes cannot be tested. This may be ok if
+there is a corresponding public class/function that can exercise the private
+members but this may not always be the case. In these situations it is best to
+move the private class into its own file which does not get exported by the
+top-level export and make it public. The tests can now access your private
+members.
 
 ```
 /// BAD - this code does not make _Taco directly testable
@@ -310,7 +471,11 @@
 
 ## Design Patterns
 ### Disallowing Subclassing
-It can be useful for a library to declare a common base class without allowing developers to extend the common base class. The common pattern for supporting this is to declare a private constructor on your public base class. This has the effect of allowing subclasses within the same file to extend the base class while not allowing users of your library to subclass the base class. 
+It can be useful for a library to declare a common base class without allowing
+developers to extend the common base class. The common pattern for supporting
+this is to declare a private constructor on your public base class. This has the
+effect of allowing subclasses within the same file to extend the base class
+while not allowing users of your library to subclass the base class.
 
 ```
 /// Base class
@@ -333,13 +498,24 @@
 }
 ```
 
-It is important to note that this pattern does not restrict users from subclassing the child class since it has a public constructor. If this restriction is required see the factory constructors pattern below. 
+It is important to note that this pattern does not restrict users from
+subclassing the child class since it has a public constructor. If this
+restriction is required see the factory constructors pattern below.
 
-This pattern is useful if the implementation surface is small since the pattern requires all of the subclasses to live in the same file as the base class or to use the part of directive which is discouraged. If the surface area is too large for a single file consider an alternate pattern.
+This pattern is useful if the implementation surface is small since the pattern
+requires all of the subclasses to live in the same file as the base class or to
+use the part of directive which is discouraged. If the surface area is too large
+for a single file consider an alternate pattern.
 
 
-### Factory Constructors 
-There are times when a user only needs to interact with a single interface but which may have a different implementation depending on how the object was constructed. Requiring the user to know about the different implementations can add extra API which is not needed and only serves to confuse the user. In this situation you can define an abstract base class which defines the API surface and create factory constructors which vends the appropriate private class.
+### Factory Constructors
+
+There are times when a user only needs to interact with a single interface but
+which may have a different implementation depending on how the object was
+constructed. Requiring the user to know about the different implementations can
+add extra API which is not needed and only serves to confuse the user. In this
+situation you can define an abstract base class which defines the API surface
+and create factory constructors which vends the appropriate private class.
 
 ```
 // Publicly exported class
@@ -373,10 +549,17 @@
 }
 ```
 
-Note: If you need to add the restriction that the base class cannot be extended you can implement the pattern defined in Disallowing Subclassing which adds a private constructor to the public base class
+Note: If you need to add the restriction that the base class cannot be extended
+you can implement the pattern defined in Disallowing Subclassing which adds a
+private constructor to the public base class
 
 ### Working with FIDLs
-Try to make a clear distinction between regular object types and FIDL types. This makes it easier for the maintainers of the code to identify FIDL types from other types and take the necessary precautions when needed. Consider using the as when importing a FIDL service and prefixing it with “fidl_”, this makes it very to identify FIDL types across the entire file. 
+
+Try to make a clear distinction between regular object types and FIDL types.
+This makes it easier for the maintainers of the code to identify FIDL types from
+other types and take the necessary precautions when needed. Consider using the
+as when importing a FIDL service and prefixing it with “fidl_”, this makes it
+very to identify FIDL types across the entire file.
 
 ```
 import 'package:fidl_fuchsia_foo/fidl_async.dart' as fidl_foo;
@@ -385,10 +568,19 @@
 fidl_foo.Bar myMethod(String baz) {...} 
 ```
 
-When subclassing FIDL types extend them so they can be interchanged with the generated FIDL files. Usually, wrappers decorate the existing type with additional functionality that compliments the original object. However, by extending it from the original FIDL it allows the existing and new API to work with original FIDL types instead of the more concrete types which is useful when interacting with other FIDLs or when developers are not using your wrapper.
+When subclassing FIDL types extend them so they can be interchanged with the
+generated FIDL files. Usually, wrappers decorate the existing type with
+additional functionality that compliments the original object. However, by
+extending it from the original FIDL it allows the existing and new API to work
+with original FIDL types instead of the more concrete types which is useful when
+interacting with other FIDLs or when developers are not using your wrapper.
 
 ### Decoupling implementation concerns
-Try to avoid interfaces which cover multiple areas of concerns. By breaking down the concerns users can have more flexibility with how they choose to combine the interfaces and allows composed objects to be passed to methods with specific concerns.
+
+Try to avoid interfaces which cover multiple areas of concerns. By breaking down
+the concerns users can have more flexibility with how they choose to combine the
+interfaces and allows composed objects to be passed to methods with specific
+concerns.
 
 ```
 void main() {
@@ -425,7 +617,11 @@
 ```
 
 ### Iteration of Modifiable Collections
-When exposing an API that can modify some sort of collection it is important to protect against modifying the collection during iteration. When iterating over an internal collection consider making a copy of the backing collection to iterate. This will protect from exceptions being thrown for concurrent modification of the underlying collection.
+When exposing an API that can modify some sort of collection it is important to
+protect against modifying the collection during iteration. When iterating over
+an internal collection consider making a copy of the backing collection to
+iterate. This will protect from exceptions being thrown for concurrent
+modification of the underlying collection.
 
 ```
 class Controller {
@@ -444,22 +640,51 @@
 ```
 
 ## Anti Patterns
-The following patterns should be avoided when writing Dart libraries for the Fuchsia Dart SDK.
-Exposing Internal Details for Testing
-It may be tempting to expose certain aspects of your API for testing concerns. However, doing so can clutter your public interface and leak implementation details which the user does not need to know about or may come to rely on. See the [Testing](#Testing) section for more details
+
+The following patterns should be avoided when writing Dart libraries for the
+Fuchsia Dart SDK. Exposing Internal Details for Testing It may be tempting to
+expose certain aspects of your API for testing concerns. However, doing so can
+clutter your public interface and leak implementation details which the user
+does not need to know about or may come to rely on. See the [Testing](#Testing)
+section for more details
 
 ### Accepting/Returning dynamic Types
-Dart provides a dynamic type which the compiler will allow any type to be passed to a function and returned from a function. This can be useful in some situations like json encoding/decoding but in the general case it should be avoided. Using dynamic types prevents the compiler from performing static type checking at compile time and introduces hard to debug run-time errors. 
 
-In situations where an API might need to accept/return multiple input types consider using generics or defining an interface which the object implements instead. In situations where this will not work, consider defining multiple methods which call through to the private dynamic accepting function.
+Dart provides a dynamic type which the compiler will allow any type to be passed
+to a function and returned from a function. This can be useful in some
+situations like json encoding/decoding but in the general case it should be
+avoided. Using dynamic types prevents the compiler from performing static type
+checking at compile time and introduces hard to debug run-time errors.
+
+In situations where an API might need to accept/return multiple input types
+consider using generics or defining an interface which the object implements
+instead. In situations where this will not work, consider defining multiple
+methods which call through to the private dynamic accepting function.
 
 ### Using Private Methods Across Files
-Dart distinguishes private members from public members by prefixing them with the underscore. This creates isolation between files reduces coupling. This can be overridden by using the `part of` directive at the top of a file. This directive has the effect of combining multiple files and allowing them to access each others private members. Doing this makes it hard to rationalize about what is public and what is private and creates tight coupling between classes. Rather than using this directive, it is recommended to only interact with another object via its public interfaces. If classes must interact via private interfaces it is recommended to keep them in the same file to clearly indicate their relationship.
+
+Dart distinguishes private members from public members by prefixing them with
+the underscore. This creates isolation between files reduces coupling. This can
+be overridden by using the `part of` directive at the top of a file. This
+directive has the effect of combining multiple files and allowing them to access
+each others private members. Doing this makes it hard to rationalize about what
+is public and what is private and creates tight coupling between classes. Rather
+than using this directive, it is recommended to only interact with another
+object via its public interfaces. If classes must interact via private
+interfaces it is recommended to keep them in the same file to clearly indicate
+their relationship.
 
 ### Global Static Variables
-Global static variables can be useful in sharing state across a library but they can easily introduce race conditions and hard to debug code. Global variables can also be accessed by users of your library which may introduce unexpected side effects. It is strongly recommended that you avoid global static variables in public libraries. 
 
-If there is a reason that your package does need to use a global static variable it is recommended to use zone-local static variables instead to isolate the variable from users of your library.
+Global static variables can be useful in sharing state across a library but they
+can easily introduce race conditions and hard to debug code. Global variables
+can also be accessed by users of your library which may introduce unexpected
+side effects. It is strongly recommended that you avoid global static variables
+in public libraries.
+
+If there is a reason that your package does need to use a global static variable
+it is recommended to use zone-local static variables instead to isolate the
+variable from users of your library.
 
 ```
 void startComputation() {
diff --git a/docs/concepts/booting/program_loading.md b/docs/concepts/booting/program_loading.md
index 25285cd..37946de 100644
--- a/docs/concepts/booting/program_loading.md
+++ b/docs/concepts/booting/program_loading.md
@@ -20,7 +20,7 @@
 provided by the kernel in ELF format and uses the C/C++ calling conventions
 common to ELF-based systems.
 
-Userspace code (given the appropriate capabilities) can use [system calls] to 
+Userspace code (given the appropriate capabilities) can use [system calls] to
 directly create processes and load programs without
 using ELF, but Zircon's standard ABI for machine code uses ELF as described here.
 
diff --git a/docs/concepts/booting/why_fuchsia_devices_reboot.md b/docs/concepts/booting/why_fuchsia_devices_reboot.md
index 2e0fb72..64a002a 100644
--- a/docs/concepts/booting/why_fuchsia_devices_reboot.md
+++ b/docs/concepts/booting/why_fuchsia_devices_reboot.md
@@ -77,6 +77,11 @@
 packages, that cannot be updated ephemerally. These packages are canonically
 know as base packages.
 
+### Retry system update
+
+A component responsible for system updates fails to apply an update, so the device
+reboots to try again (or possibly revert the update).
+
 ### High temperature
 
 A component responsible for power management detects that a device's temperature
@@ -135,6 +140,7 @@
 Brief power loss             | `BRIEF POWER LOSS`            | `BriefPowerLoss`           | `fuchsia-brief-power-loss`
 User request                 | `USER_REQUEST`                | `UserRequest`              | N/A\*
 System update                | `SYSTEM_UPDATE`               | `SystemUpdate`             | N/A\*
+Retry system update          | `RETRY_SYSTEM_UPDATE`         | `RetrySystemUpdate`        | `fuchsia-retry-system-update`
 High temperature             | `HIGH_TEMPERATURE`            | `HighTemperature`          | N/A\*
 Session failure              | `SESSION_FAILURE`             | `SessionFailure`           | `fuchsia-session-failure`
 Sysmgr failure               | `SYSMGR_FAILURE`              | `SysmgrFailure`            | `fuchsia-sysmgr-failure`
diff --git a/docs/concepts/build_system/fuchsia_build_system_overview.md b/docs/concepts/build_system/fuchsia_build_system_overview.md
index 41d4e9b..cb4961d 100644
--- a/docs/concepts/build_system/fuchsia_build_system_overview.md
+++ b/docs/concepts/build_system/fuchsia_build_system_overview.md
@@ -105,7 +105,7 @@
 
 ## Rebuilding
 
-In order to rebuild the tree after modifying some sources, just rerun 
+In order to rebuild the tree after modifying some sources, just rerun
 **Build step**. This holds true even if you modify `BUILD.gn` files as GN adds
 Ninja targets to update Ninja targets if build files are changed! The same
 holds true for other files used to configure the build. Any change of source
diff --git a/docs/concepts/components/_toc.yaml b/docs/concepts/components/_toc.yaml
index 849985f..77fc0a9 100644
--- a/docs/concepts/components/_toc.yaml
+++ b/docs/concepts/components/_toc.yaml
@@ -28,8 +28,6 @@
     path: /docs/concepts/components/v2/topology.md
   - title: "Monikers"
     path: /docs/concepts/components/v2/monikers.md
-  - title: "The difference between components and processes"
-    path: /docs/concepts/components/v2/components_and_processes.md
   - title: "Realms"
     path: /docs/concepts/components/v2/realms.md
   - title: "Environments"
@@ -59,5 +57,7 @@
         path: /docs/concepts/components/v2/design_principles.md
       - title: "Life of a protocol open"
         path: /docs/concepts/components/v2/life_of_a_protocol_open.md
+  - title: "Components vs. processes"
+    path: /docs/concepts/components/v2/components_vs_processes.md
 - title: "Component URLs"
   path: /docs/concepts/components/component_urls.md
diff --git a/docs/concepts/components/v1/component_manifests.md b/docs/concepts/components/v1/component_manifests.md
index dd063cc..6c068ab 100644
--- a/docs/concepts/components/v1/component_manifests.md
+++ b/docs/concepts/components/v1/component_manifests.md
@@ -22,8 +22,8 @@
     "sandbox": {
         "system": [ "data/sysmgr" ],
         "services": [
-            "fuchsia.sys.Launcher",
-            "fuchsia.netstack.Netstack"
+            "fuchsia.posix.socket.Provider",
+            "fuchsia.sys.Launcher"
         ]
     }
 }
@@ -216,7 +216,7 @@
 The `services` array defines a list of services from `/svc` that the
 component may access. A typical component will require a number services from
 `/svc` in order to play some useful role in the system. For example, if
-`"services" = [ "fuchsia.sys.Launcher", "fuchsia.netstack.Netstack" ]`, the
+`"services" = [ "fuchsia.posix.socket.Provider", "fuchsia.sys.Launcher" ]`, the
 component will have the ability to launch other components and access network
 services. A component may declare any list of services in its `services`,
 but it will only be able to access services present in its
diff --git a/docs/concepts/components/v2/README.md b/docs/concepts/components/v2/README.md
index 2a1286a..08599cb 100644
--- a/docs/concepts/components/v2/README.md
+++ b/docs/concepts/components/v2/README.md
@@ -23,8 +23,6 @@
 - [Realms](realms.md): Sub-trees of the component instance topology.
 - [Monikers](monikers.md): Identifiers for component instances based on
   the component topology.
-- [The difference between components and processes](components_and_processes.md):
-  The relationship between components and processes.
 
 ## Developing components
 
@@ -54,9 +52,10 @@
 - [Life of a protocol open](life_of_a_protocol_open.md): How components connect
   to protocols in their namespaces.
 
-# Components (either version)
+## Components (either version)
 
-- [Component URLs][doc-component_urls] are URLs that identify components.
+- [Component URLs][doc-component-urls] are URLs that identify components.
+- [Components vs. processes](components_vs_processes.md): how the concepts differ
 
 [doc-component-urls]: /docs/concepts/components/component_urls.md
 [glossary-components-v1]: /docs/glossary.md#components-v1
diff --git a/docs/concepts/components/v2/component_manifests.md b/docs/concepts/components/v2/component_manifests.md
index 58d92e2..e287ccb 100644
--- a/docs/concepts/components/v2/component_manifests.md
+++ b/docs/concepts/components/v2/component_manifests.md
@@ -508,7 +508,7 @@
     {
         directory: "themes",
         path: "/data/themes",
-        rights: [ "r* ]',
+        rights: [ "r*" ],
     },
     {
         storage: "persistent",
diff --git a/docs/concepts/components/v2/components_and_processes.md b/docs/concepts/components/v2/components_and_processes.md
deleted file mode 100644
index de7003b..0000000
--- a/docs/concepts/components/v2/components_and_processes.md
+++ /dev/null
@@ -1,47 +0,0 @@
-# The difference between components and processes
-
-This document explains how components relate to jobs, processes, and threads.
-
-There is no inherent relationship between [component instances](introduction.md)
-and a Zircon task (job, process, or thread). It's best to avoid a mental model
-with a fixed relationship between components and Zircon tasks.
-
-## No direct relationship
-
-There's no inherent relationship between Zircon tasks and components.
-
-Note: Compare with section [Dynamic relationships](#dynamic-relationships).
-
-To illustrate that there is no inherent relationship, consider that a component
-may:
-
-- Have zero or more jobs.
-- Have zero or more processes.
-- Have zero or more threads.
-- Share a job with other components.
-- Share a process with other components.
-- Share a thread with other components.
-
-Different components are expressed, or implemented, differently (even in ways we
-haven't yet explored).
-
-## Dynamic relationships {#dynamic-relationships}
-
-The way components and Zircon tasks relate is dynamic. On initial inspection it
-may appear that there is a hierarchy, but there is no hierarchy between
-components and processes.
-
-## Examples
-
-Here are some examples of specific component types to illustrate the nature of
-the component abstraction:
-
-- Dart Runner
-    - The Dart runner, which itself is a component, is a single process that
-      runs separate Dart components in separate threads.
-- ELF binaries
-    - The [ELF runner](elf_runner.md), which itself is a component, starts a
-      process to kick off the component and then lets that process spawn
-      additional processes as part of the same component.
-- Web page components (using Web Runner)
-    - The Web Runner uses multiple processes in a single component.
diff --git a/docs/concepts/components/v2/components_vs_processes.md b/docs/concepts/components/v2/components_vs_processes.md
new file mode 100644
index 0000000..bfe126e
--- /dev/null
+++ b/docs/concepts/components/v2/components_vs_processes.md
@@ -0,0 +1,32 @@
+# Components vs. processes
+
+This document explains how the concept of components differs from processes and
+related concepts.
+
+The Zircon kernel defines [processes][process] and other [task objects][tasks]
+that are common in modern operating systems. The abstraction of
+[component instances][topology.md#component-instances] sometimes correlates
+with Zircon task abstractions, but not always.
+
+## Examples
+
+The relationship between components and Zircon tasks differs, often as defined
+by [component runners][runners.md] which implement strategies for launching
+component instances.
+
+-   [ELF Runner][elf_runner.md] launches components by creating a new
+    [job][job] that contains a process that's created from a given executable
+    file in ELF format.
+-   Dart Runner launches a new Dart isolate in a Dart Virtual Machine. A Dart
+    VM is implemented as a process that can host one or more Dart isolate.
+    Dart isolates execute on [threads][thread], but don't necessarily have an
+    assigned thread (this is a VM implementation detail).
+-   Web runner can launch one or more web pages as components, and host them
+    the same web engine container or in separate containers per its isolation
+    policy. Web pages are typically isolated by being hosted in separate
+    processes.
+
+[job]: /docs/reference/kernel_objects/job.md
+[process]: /docs/reference/kernel_objects/process.md
+[thread]: /docs/reference/kernel_objects/thread.md
+[tasks]: /docs/reference/kernel_objects/objects.md#tasks
\ No newline at end of file
diff --git a/docs/concepts/components/v2/images/monikers_absolute.png b/docs/concepts/components/v2/images/monikers_absolute.png
new file mode 100644
index 0000000..a50f3a1
--- /dev/null
+++ b/docs/concepts/components/v2/images/monikers_absolute.png
Binary files differ
diff --git a/docs/concepts/components/v2/images/monikers_child.png b/docs/concepts/components/v2/images/monikers_child.png
new file mode 100644
index 0000000..83de895
--- /dev/null
+++ b/docs/concepts/components/v2/images/monikers_child.png
Binary files differ
diff --git a/docs/concepts/components/v2/images/monikers_relative.png b/docs/concepts/components/v2/images/monikers_relative.png
new file mode 100644
index 0000000..1512977
--- /dev/null
+++ b/docs/concepts/components/v2/images/monikers_relative.png
Binary files differ
diff --git a/docs/concepts/components/v2/introduction.md b/docs/concepts/components/v2/introduction.md
index 21e206c..2b0e4c4 100644
--- a/docs/concepts/components/v2/introduction.md
+++ b/docs/concepts/components/v2/introduction.md
@@ -97,7 +97,6 @@
 
 Almost all software that runs on Fuchsia is a component, including:
 
--   Command-line tools
 -   Device drivers
 -   End-user applications
 -   Filesystems
diff --git a/docs/concepts/components/v2/monikers.md b/docs/concepts/components/v2/monikers.md
index 286714b..27387cb 100644
--- a/docs/concepts/components/v2/monikers.md
+++ b/docs/concepts/components/v2/monikers.md
@@ -2,8 +2,9 @@
 
 <<../_v2_banner.md>>
 
-A moniker identifies a specific component instance in the component tree
-using a topological path.
+
+A moniker identifies a specific component instance in the component tree using
+a topological path.
 
 Note: Use [component URLs][doc-component-urls] to identify the location from
 which the component's manifest and assets are retrieved; use monikers to
@@ -15,9 +16,9 @@
 
 - Child moniker: Denotes a child of a component instance relative to its parent.
 - Relative moniker: Denotes the path from a source component instance to a
-  target component instance, expressed as a sequence of child monikers.
+  target component instance. It is expressed as a sequence of child monikers.
 - Absolute moniker: Denotes the path from the root of the component instance
-  tree to a target component instance, expressed as a sequence of child
+  tree to a target component instance. It is expressed as a sequence of child
   monikers. Every component instance has a unique absolute moniker.
 
 ## Stability
@@ -54,8 +55,6 @@
 Monikers may be collected in system logs. They are also used to implement the
 component framework's persistence features.
 
-TODO: Describe obfuscation strategy.
-
 ## Notation
 
 This section describes the syntax used for displaying monikers to users.
@@ -72,8 +71,8 @@
 
 ### Instance Identifiers
 
-Instance identifiers ensure uniqueness of monikers over time whenever a parent
-destroys a component instance and creates a new one with the same name.
+Instance identifiers ensure the uniqueness of monikers over time whenever a
+parent destroys a component instance and creates a new one with the same name.
 
 Syntax: Decimal formatted 32-bit unsigned integer using characters: `0-9`.
 
@@ -84,12 +83,15 @@
 
 Syntax: `{name}:{id}` or `{collection}:{name}:{id}`
 
+The following diagram shows an example component topology,
+with the children of `alice` labeled with their child monikers.
+
+<br>![Diagram of Child Monikers](images/monikers_child.png)<br>
+
 Examples:
 
-- `truck:2`: child "truck" (instance id 2)
-- `animals:bear:1`: child "bear" (instance id 1) in collection "animals"
-
-TODO: Add a diagram to go along with the examples.
+- `carol:0`: child "carol" (instance id 0)
+- `support:dan:1`: child "dan" (instance id 1) in collection "support"
 
 ### Relative Monikers
 
@@ -102,25 +104,41 @@
 is no trailing `\` or `/`.
 
 Relative monikers are invertible; a path from source to target can be
-transformed into a path from target to source because information about
-both paths is fully encoded by the representation.
+transformed into a path from target to the source because information about
+both endpoints are fully encoded by the representation.
 
 In contrast, file system paths are not invertible because they use `..`
 to denote upwards traversal so some inverse traversal information is missing.
 
+To maintain invertibility, the syntax for denoting paths varies slightly
+for upward and downward traversals. A downward path segment is a child moniker
+of one of the current component instance's children: `./carol:2`. Conversely,
+an upward path segment *is* the child moniker of on the current component
+instance, according to its parent: `.\alice:2/bob:0`. The reason that the child
+moniker is explicitly specified in the upward path
+(instead of a generic "upward traversal" marker like `..`) is that otherwise the
+relative moniker would not be invertible, and would not uniquely identify a
+component instance. For downward traversals, the paths don't need to include
+the parent's name to be traceable because a child only has *one* parent.
+However, for upward traversals the source path can be one of many children of
+its parent path.
+
 Syntax: `.\{path from source to ancestor}/{path from ancestor to target}`
 
+The following diagram shows an example component topology, with all relative
+monikers that can be derived from the source component `alice` labeled. Note
+that `support` is not a component but rather a collection with two
+children: `dan` and `jan`.
+
+<br>![Diagram of Relative Monikers](images/monikers_relative.png)<br>
+
 Examples:
 
 - `.`: self - no traversal needed
-- `./truck:2`: a child - traverse down `truck:2`
-- `./truck:2/axle:1`: a grandchild - traverse down `truck:2` then down `axle:1`
-- `.\truck:2/animals:bear:1`: a cousin - traverse up `truck:2` then down
-  `animals:bear:1`
-- `.\animals:bear:1/truck:2`: a cousin - inverse of the prior example,
-  constructed by reversing the segments of the traversal
-
-TODO: Add a diagram to go along with the examples.
+- `./carol:2`: a child - traverse down `carol:2`
+- `./carol:2/sandy:1`: a grandchild - traverse down `carol:2` then down `sandy:1`
+- `.\alice:2/bob:0`: a cousin - traverse up `alice:2` then down `bob:0`
+- `./support:dan:1`: a child - traverse down into collection child `support:dan:1`
 
 ### Absolute Monikers
 
@@ -132,13 +150,21 @@
 
 Syntax: `/{path from root to target}`
 
+The following diagram shows an example component topology, all absolute
+monikers that can be derived from the unnamed root component labeled. The root
+component is unnamed because it is inherently not the child of any other
+component and components are named by their parents, not by components
+themselves. Note that `support` is not a component but rather a collection with
+two children: `dan` and `jan`.
+
+<br>![Diagram of Absolute Monikers](images/monikers_absolute.png)<br>
+
 Examples:
 
 - `/`: the root itself (it has no name because it has no parent)
-- `/objects:2/animals:deer:1`: from root traverse down `objects:2` then down
-  `animals:deer:1`
+- `/alice:2/support:dan:1`: from root traverse down `alice:2` then down `support:dan:1`
+- `/alice:2/carol:1`: from root traverse down `alice:2` then down `carol:1`
 
-TODO: Add a diagram to go along with the examples.
 
 [doc-manifests]: component_manifests.md
 [doc-component-urls]: introduction.md#component-urls
diff --git a/docs/concepts/components/v2/services.md b/docs/concepts/components/v2/services.md
index a725652..f4bbb5d 100644
--- a/docs/concepts/components/v2/services.md
+++ b/docs/concepts/components/v2/services.md
@@ -13,8 +13,8 @@
 A component can also access multiple instances in its incoming namespace.
 These are presented in the incoming namespace as subdirectories of the service.
 
-For example, the Netstack service with instance `default` would be accessible
-at the path `/svc/fuchsia.netstack.Netstack/default`.
+For example, the Launcher service with instance `default` would be accessible
+at the path `/svc/fuchsia.sys.Launcher/default`.
 
 ## Protocols
 
@@ -22,19 +22,19 @@
 Logically-related protocols can be aggregated into a service and routed as a
 single unit.
 
-An example of a FIDL service definition:
+An example of a FIDL service definition (defined in fuchsia.network):
 
 ```fidl
-service Netstack {
+service Provider {
     fuchsia.net.NameLookup name_lookup;
     fuchsia.posix.socket.Provider socket_provider;
 }
 ```
 
 Each protocol has a name and is accessible as a subdirectory of the service
-instance. For example, the `socket_provider` protocol of the Netstack service
-instance `default` is accessible at the path
-`/svc/fuchsia.netstack.Netstack/default/socket_provider`.
+instance. For example, the `socket_provider` protocol of the
+`fuchsia.network.Provider` service instance `default` is accessible at the path
+`/svc/fuchsia.network.Provider/default/socket_provider`.
 
 Note: If the instance name and protocol are known ahead of time, it is possible
 to open the protocol directly with zero round-trips.
diff --git a/docs/concepts/diagnostics/_toc.yaml b/docs/concepts/diagnostics/_toc.yaml
index ca0e646..0b6ad67 100644
--- a/docs/concepts/diagnostics/_toc.yaml
+++ b/docs/concepts/diagnostics/_toc.yaml
@@ -35,6 +35,9 @@
   - title: "Overview"
     path: /docs/concepts/diagnostics/logs/README.md
 
+  - title: "Attributing LogSink connections"
+    path: /docs/concepts/diagnostics/logs/attribution.md
+
   # TODO(fxbug.dev/60766)
   # - title: "Principles"
   #   path: /docs/concepts/diagnostics/logs/principles.md
diff --git a/docs/concepts/diagnostics/logs/README.md b/docs/concepts/diagnostics/logs/README.md
index 2875590..e5de00b 100644
--- a/docs/concepts/diagnostics/logs/README.md
+++ b/docs/concepts/diagnostics/logs/README.md
@@ -8,6 +8,9 @@
 
 See [Viewing] for information about how to view the recorded logs.
 
+See [Attributing LogSink connections] for information about how Fuchsia identifies
+the producer of each log message.
+
 ## Contents
 
 [Log records][LogMessage] have a few pieces of metadata, mostly self-reported by
@@ -56,3 +59,4 @@
 [feedback data]: /src/developer/forensics/feedback_data
 [persistent disk store]: /src/developer/forensics/feedback_data/system_log_recorder/system_log_recorder.h
 [`fx snapshot`]: /src/developer/forensics/snapshot/README.md
+[Attributing LogSink connections]: /docs/concepts/diagnostics/logs/attribution.md
diff --git a/docs/reference/diagnostics/logs/attribution.md b/docs/concepts/diagnostics/logs/attribution.md
similarity index 100%
rename from docs/reference/diagnostics/logs/attribution.md
rename to docs/concepts/diagnostics/logs/attribution.md
diff --git a/docs/concepts/drivers/driver-binding.md b/docs/concepts/drivers/driver-binding.md
index 8d662ba..b256935 100644
--- a/docs/concepts/drivers/driver-binding.md
+++ b/docs/concepts/drivers/driver-binding.md
@@ -40,8 +40,9 @@
 macro:
 
 ```
-ZIRCON_DRIVER(Driver, Ops, VendorName, Version)
+ZIRCON_DRIVER(Driver, Ops, VendorName, Version);
 ```
+
  - `Driver` is the name of the driver.
  - `Ops` is a `zx_driver_ops`, which are the driver operation hooks
  - `VendorName` is a string representing the name of the driver vendor.
diff --git a/docs/concepts/drivers/driver-development.md b/docs/concepts/drivers/driver-development.md
index 695125b..9e90af4 100644
--- a/docs/concepts/drivers/driver-development.md
+++ b/docs/concepts/drivers/driver-development.md
@@ -60,8 +60,8 @@
 `bind()` driver op.
 
 Drivers are loaded and bound to a device when the Device Coordinator
-successfully finds a matching driver for a device. A driver declares the devices
-it is compatible with through bind rules, which are should be placed in a
+successfully finds a matching driver for a device. A driver declares the
+devices it is compatible with through bind rules, which should be placed in a
 `.bind` file alongside the driver. The bind compiler compiles those rules and
 creates a driver declaration macro containing those rules in a C header file.
 The following bind program declares the
diff --git a/docs/concepts/drivers/driver-utils.md b/docs/concepts/drivers/driver-utils.md
index e3601d8..5eb3627 100644
--- a/docs/concepts/drivers/driver-utils.md
+++ b/docs/concepts/drivers/driver-utils.md
@@ -87,6 +87,7 @@
 To implement the Fuchsia application that would communicate with the device,
 call into the FIDL API. For this utilize the FIDL bindings for your language of
 choice, for C++:
+
 * [LLCPP](/docs/reference/fidl/bindings/llcpp-bindings.md).
 * [HLCPP](/docs/reference/fidl/bindings/hlcpp-bindings.md).
 
diff --git a/docs/concepts/drivers/driver_interfaces/audio_streaming.md b/docs/concepts/drivers/driver_interfaces/audio_streaming.md
index 9df8381..9b0f093 100644
--- a/docs/concepts/drivers/driver_interfaces/audio_streaming.md
+++ b/docs/concepts/drivers/driver_interfaces/audio_streaming.md
@@ -18,6 +18,7 @@
 outputs streams is exclusive to the application owner of the stream. Mixing of
 audio is not a service provided by the audio stream interface.
 
+{% comment %}
 > TODO(fxbug.dev/35523):
 > The pre-FIDL serialization still in use as of 2020/02/04
 > is defined in [audio_streaming_original.md](audio_streaming_original.md),
@@ -26,6 +27,7 @@
 
 > TODO: extend this interface to support the concept of low-latency hardware
 > mixers.
+{% endcomment %}
 
 ### Definitions
 
@@ -62,9 +64,11 @@
 :                               : these interfaces to communicate with an      :
 :                               : audio driver/device.                         :
 
+{% comment %}
 > TODO: do we need to extend this interface to support non-linear audio sample
 > encodings? This may be important for telephony oriented microphones which
 > deliver &mu;-law encoded samples.
+{% endcomment %}
 
 ### Basic Operation
 
@@ -83,9 +87,11 @@
 *   Plug detection notification
 *   Access control capability detection and signalling
 
+{% comment %}
 > TODO: Should plug/unplug detection be done by sending notifications over the
 > stream channel (as it is today), or by publishing/unpublishing the device
 > nodes (and closing all channels in the case of unpublished channels)?
+{% endcomment %}
 
 In order to actually send or receive audio information on the stream, the
 specific format to be used must first be set. The response to a successful
@@ -225,7 +231,7 @@
 *   When encoding a smaller sample size in a larger channel (e.g. 20 or 24bit in
     32), the most significant bits of the 32 bit container are used while the
     least significant bits will be ignored (left justified). e.g. a 20 bit sample would be mapped
-    onto the range [12,31] (bits [0,11] would be ignored) of the 32 bit container.
+    onto the range \[12,31\] (bits \[0,11\] would be ignored) of the 32 bit container.
 
 ### Setting the desired stream format
 
@@ -259,7 +265,7 @@
 or the USB Audio specifications, or reported by down stream devices using
 mechanisms such as EDID when using HDMI or DisplayPort interconnects.
 
-## Hardware Gain Control
+## Hardware gain control
 
 ### Hardware gain control capability reporting
 
@@ -317,12 +323,12 @@
 headphones may publish a new output stream when connected to USB, but choose to
 be "hardwired" from a plug detection standpoint. A different USB audio adapter
 with a standard 3.5mm phono jack might publish an output stream when connected
-via USB, but choose to change its plugged/unplugged state as the user plugs and
-unplugs an analog device via the 3.5mm jack.
+with USB, but choose to change its plugged/unplugged state as the user plugs and
+unplugs an analog device with the 3.5mm jack.
 
 The ability to query the currently plugged or unplugged state of a stream, and
 to register for asynchonous notifications of plug state changes (if supported)
-is handled via plug detection messages.
+is handled through plug detection messages.
 
 ### Plug detect capabilities
 
@@ -360,14 +366,16 @@
 
 ## Stream purpose and association
 
+{% comment %}
 > TODO: specify how drivers can indicate the general "purpose" of an audio
 > stream in the system (if known), as well as its relationship to other streams
 > (if known). For example, an embedded target like a phone or a tablet needs to
 > indicate which output stream is the built-in speaker vs. which is the headset
 > jack output. In addition, it needs to make clear which input stream is the
 > microphone associated with the headset output vs. the builtin speaker.
+{% endcomment %}
 
-## Ring-Buffer Channels
+## Ring-Buffer channels
 
 ### Overview
 
@@ -435,7 +443,7 @@
 buffer. This is done by sending an `CreateRingBuffer` request over the
 ring-buffer channel. This may only be done while the ring-buffer is stopped.
 
-If the channel created via `CreateRingBuffer` is closed by the driver for instance
+If the channel created with `CreateRingBuffer` is closed by the driver for instance
 because a buffer has already been established and the ring-buffer has already
 been started, it must not either stop the ring-buffer, or discard the
 existing shared memory. If the application requests a new buffer after having
@@ -455,12 +463,14 @@
 must ensure that the size of the ring buffer is an integral number of audio
 frames.
 
+{% comment %}
 > TODO : Is it reasonable to require that drivers produce buffers which are an
 > integral number of audio frames in length? It certainly makes the audio
 > client's life easier (client code never needs to split or re-assemble a frame
 > before processing), but it might make it difficult for some audio hardware to
 > meet its requirements without making the buffer significantly larger than the
 > client asked for.
+{% endcomment %}
 
 #### `clock_recovery_notifications_per_ring`
 
@@ -498,7 +508,7 @@
 Upon successfully starting a stream, drivers must provide their best estimate of
 the time at which their hardware began to transmit or capture the stream in the
 `start_time` field of the response. This time stamp must be taken from the clock
-exposed via the
+exposed with the
 [zx_clock_get_monotonic()](/docs/reference/syscalls/clock_get_monotonic.md)
 syscall. Along with the FIFO depth property of the ring buffer, this timestamp
 allows applications to send or receive stream data without the need for periodic
@@ -510,15 +520,17 @@
 [monotonic](/docs/reference/syscalls/clock_get_monotonic.md) timelines across
 the cohort of synchronized devices).
 
+{% comment %}
 > TODO: Redefine `start_time` to allow it to be an arbitrary 'audio stream
 > clock' instead of the `zx_clock_get_monotonic()` clock. If the stream clock is
 > made to count in audio frames since start, then this `start_time` can be
 > replaced with the terms for a segment of a piecewise linear transformation
-> which can be subsequently updated via notifications sent by the driver in the
+> which can be subsequently updated through notifications sent by the driver in the
 > case that the audio hardware clock is rooted in a different oscillator from
 > the system's tick counter. Clients can then use this transformation either to
 > control the rate of consumption of input streams, or to determine where to
 > sample in the input stream to effect clock correction.
+{% endcomment %}
 
 Upon successfully starting a stream, drivers must guarantee that no position
 notifications will be sent before the start response has been enqueued into the
@@ -530,7 +542,7 @@
 
 ### Position notifications
 
-If requested by the client via a non-zero `clock_recovery_notifications_per_ring` in the
+If requested by the client through a non-zero `clock_recovery_notifications_per_ring` in the
 `CreateRingBuffer` operation, the driver will
 periodically send updates to the client informing it of its current production
 or consumption position in the buffer. This position is expressed in bytes in
@@ -574,6 +586,7 @@
 information (in addition to `AUDIO_RB_POSITION_NOTIFY` messages) to simplify the
 process of recovering the audio device's clock.
 
+{% comment %}
 > TODO: extend this section to include how clock recovery occurs, and how this
 > is exposed to clients. Also, detail how slewable oscillators are discovered
 > and controlled. We may need rate-change notifications to clients of slewable
@@ -588,10 +601,13 @@
 > wide clock identifier and provide the ability to obtain a channel on which
 > clock recovery notifications can be delivered to clients and hardware slewing
 > command can be sent from clients to the clock.
+{% endcomment %}
 
 ### Error notifications
 
-> TODO: define these and what driver behavior should be, if/when they occur.
+{% comment %}
+TODO: define these and what driver behavior should be, if/when they occur.
+{% endcomment %}
 
 ### Unexpected client termination
 
diff --git a/docs/concepts/emulator/index.md b/docs/concepts/emulator/index.md
index ab35c75..fa60813 100644
--- a/docs/concepts/emulator/index.md
+++ b/docs/concepts/emulator/index.md
@@ -4,8 +4,8 @@
 FEMU is included in Fuchsia source, and it’s downloaded by `jiri` as part of `jiri update` or `jiri run-hooks`.
 It’s fetched into the Fuchsia directory `/prebuilt/third_party/aemu`.
 
-You can call FEMU with `fx` using the `fx emu` command, or from the Fuchsia IDK using `femu.sh`.
-
+You can call FEMU with `fx` using `fx emu` (Linux) or `fx vdl` (macOS). Alternatively, 
+you can call FEMU from the Fuchsia IDK using `femu.sh`.
 
 ## FEMU and other emulators {#femu-and-other-emulators}
 
@@ -23,7 +23,8 @@
 FEMU features include:
 
 *   **GUI Support:** You can run Fuchsia with the GUI (by default) or without the GUI
-    (using the `--headless` argument with the [fx emu](https://fuchsia.dev/reference/tools/fx/cmd/emu) command)
+    (using the `--headless` argument with [fx emu](https://fuchsia.dev/reference/tools/fx/cmd/emu)
+    or [fx vdl](https://fuchsia.dev/reference/tools/fx/cmd/vdl) commands).
 *   **GPU Support:** You can run with the host’s GPU (by default) with full
     [Vulkan](/docs/concepts/graphics/magma/vulkan.md) support, or you can choose
     software rendering using [SwiftShader](https://swiftshader.googlesource.com/SwiftShader/).
@@ -31,8 +32,12 @@
      or from the command line using [fx emu-remote](https://fuchsia.dev/reference/tools/fx/cmd/emu-remote)
      command or `femu.sh` with the Fuchsia IDK.
 
-To configure these features, see the [Running Fuchsia Emulator](/docs/development/run/femu.md)
-page. Additional features are listed in the [fx emu](https://fuchsia.dev/reference/tools/fx/cmd/emu) reference page.
+To configure these features, see the [Set up and start FEMU](/docs/get-started/set_up_femu.md)
+page.
+
+Additional features are listed in the [fx emu](https://fuchsia.dev/reference/tools/fx/cmd/emu)
+and [fx vdl](https://fuchsia.dev/reference/tools/fx/cmd/vdl) reference pages.
+
 If you’re using the Fuchsia IDK, `femu.sh` supports the same flags as `fx emu`.
 
 
@@ -40,7 +45,7 @@
 
 ### FEMU image and board support {#femu-image-and-board-support}
 
-When setting up FEMU using `fx emu`, FEMU only supports the following boards:
+When setting up FEMU using `fx set`, FEMU only supports the following boards:
 
 *   `qemu-x64`
 *   `qemu-arm64`
@@ -51,6 +56,7 @@
 *   `workstation.qemu-x64-release`
 *   `qemu-arm64`
 
+Note: ARM64 support (`qemu-arm64`) is very limited and not recommended.
 
 ### FEMU networking  {#femu-networking}
 
@@ -80,7 +86,6 @@
 
 Then you can use FEMU to do the following:
 
-*   [Set up and configure FEMU](/docs/get-started/set_up_femu.md)
-*   [Run FEMU](/docs/development/run/femu.md)
+*   [Set up and start FEMU](/docs/get-started/set_up_femu.md)
 *   [Test components](/docs/development/run/run-test-component.md)
 *   [Run end-to-end tests](/docs/development/testing/run_an_end_to_end_test.md)
diff --git a/docs/concepts/filesystems/filesystems.md b/docs/concepts/filesystems/filesystems.md
index 336cd9a..b40c443 100644
--- a/docs/concepts/filesystems/filesystems.md
+++ b/docs/concepts/filesystems/filesystems.md
@@ -281,7 +281,7 @@
 partition is using the slice, and the virtual address of the slice within
 that partition.
 
-[Superblock](/zircon/system/ulib/fvm/include/fvm/format.h#27)
+[Superblock](/src/storage/fvm/format.h#27)
 at block zero describe the on-disk layout of the FVM, which may look like
 
 ```c
@@ -326,10 +326,10 @@
    * what logical slice within partition the slice maps to
 
 FVM library can be found
-[here](/zircon/system/ulib/fvm/). During
+[here](/src/storage/fvm/). During
 [paving](/docs/development/hardware/paving.md),
 some partitions are copied from host to target. So the partitions and FVM
 file itself may be created on host. To do this there is host side utility
-[here](/zircon/tools/fvm).
+[here](/src/storage/bin/fvm).
 Integrity of the FVM device/file can be verbosely verified with
 [fvm-check](/src/devices/block/bin/fvm-check)
diff --git a/docs/concepts/graphics/escher/OWNERS b/docs/concepts/graphics/escher/OWNERS
new file mode 100644
index 0000000..0ca93ab6
--- /dev/null
+++ b/docs/concepts/graphics/escher/OWNERS
@@ -0,0 +1,3 @@
+include /src/ui/lib/escher/OWNERS
+
+# COMPONENT: Escher
diff --git a/docs/concepts/graphics/magma/vulkan.md b/docs/concepts/graphics/magma/vulkan.md
index a6e0ea3..c12b4f7 100644
--- a/docs/concepts/graphics/magma/vulkan.md
+++ b/docs/concepts/graphics/magma/vulkan.md
@@ -8,8 +8,33 @@
 [board](/docs/concepts/build_system/boards_and_products.md) that is selected
 when building.
 
-A component that will use Vulkan must include these features and services in its
-.cmx file:
+Include the following in your component manifest to enable access to the Vulkan driver:
+
+```json
+{
+   "include": [
+      "src/lib/vulkan/application.shard.cmx"
+   ],
+   ...
+}
+```
+
+A [test component](/docs/concepts/testing/v1_test_component.md) should instead have
+these lines in its .cmx:
+
+```json
+{
+   "include": [
+      "src/lib/vulkan/test-application.shard.cmx"
+   ],
+   ...
+}
+```
+
+### Out of tree runtime dependencies
+An application that is not in the Fuchsia tree or which otherwise can't
+include the file above must include these features and services in its .cmx
+file:
 
 ```json
 {
@@ -32,7 +57,7 @@
 recommended to allow logs from the client driver to appear in the [system
 log](/docs/development/diagnostics/logs/viewing.md).
 
-A [test component](/docs/concepts/testing/test_component.md) must also have
+A [test component](/docs/concepts/testing/v1_test_component.md) must also have
 these lines in its .cmx:
 
 ```json
diff --git a/docs/concepts/graphics/scenic/OWNERS b/docs/concepts/graphics/scenic/OWNERS
new file mode 100644
index 0000000..15d0220
--- /dev/null
+++ b/docs/concepts/graphics/scenic/OWNERS
@@ -0,0 +1,3 @@
+include /src/ui/scenic/OWNERS
+
+# COMPONENT: Scenic
diff --git a/docs/concepts/index.md b/docs/concepts/index.md
new file mode 100644
index 0000000..25dbb99
--- /dev/null
+++ b/docs/concepts/index.md
@@ -0,0 +1,35 @@
+# Overview
+
+Fuchsia is a new open source operating system created at Google
+that is currently under active development.
+We are building Fuchsia from the kernel up
+to meet the needs of today’s growing ecosystem of connected devices.
+
+Fuchsia is still evolving rapidly,
+but the underlying principles and values of the system
+have remained relatively constant throughout the project.
+The core architectural principles guiding Fuchsia’s design and development are:
+secure, updatable, inclusive, and pragmatic.
+
+## [Secure](/docs/concepts/principles/secure.md)
+
+All software that runs on Fuchsia receives the least privilege it needs
+to perform its job,
+and gains access only to information it needs to know.
+
+## [Updatable](/docs/concepts/principles/updatable.md)
+
+Much like the web,
+software on Fuchsia is designed to come and go as needed,
+and security patches can be pushed to all products on demand.
+
+## [Inclusive](/docs/concepts/principles/inclusive.md)
+
+Fuchsia is an open source project that currently supports a variety
+of languages and runtimes, including C++, Web, Rust, Go, Flutter, and Dart.
+
+## [Pragmatic](/docs/concepts/principles/pragmatic.md)
+
+Fuchsia is not a science experiment,
+it’s a production-grade operating system that must adhere to fundamentals,
+like performance.
diff --git a/docs/concepts/kernel/README.md b/docs/concepts/kernel/README.md
index 35f2902..dade4c1 100644
--- a/docs/concepts/kernel/README.md
+++ b/docs/concepts/kernel/README.md
@@ -1,9 +1,9 @@
 # Zircon
 
-Zircon is the core platform that powers the Fuchsia OS. Zircon is
-composed of a microkernel (source in [/zircon/kernel](/zircon/kernel)
+Zircon is the core platform that powers the Fuchsia. Zircon is
+composed of a microkernel (source in [/zircon/kernel](/zircon/kernel))
 as well as a small set of userspace services, drivers, and libraries
-(source in [/zircon/system/](/zircon/system) necessary for the system
+(source in [/zircon/system/](/zircon/system)) necessary for the system
 to boot, talk to hardware, load userspace processes and run them, etc.
 Fuchsia builds a much larger OS on top of this foundation.
 
diff --git a/docs/concepts/modular/guide/how_to_write_a_module_cc.md b/docs/concepts/modular/guide/how_to_write_a_module_cc.md
index 88a7215..9756325 100644
--- a/docs/concepts/modular/guide/how_to_write_a_module_cc.md
+++ b/docs/concepts/modular/guide/how_to_write_a_module_cc.md
@@ -1,19 +1,21 @@
-# How-To: Write a Module in C++
+# Write a module in C++
 
 > DEPRECATION WARNING: The Modular framework is being deprecated in favor of
 > the [Session Framework](/docs/concepts/session/introduction.md).
 
 ## Overview
 
-A `Module` is a UI component that can participate in a [Story](link to story doc),
-potentially composed of many different `Module`s. A `Module`'s lifecycle is tightly
+Note: For more information on Modules, see [Module](/docs/concepts/modular/module.md).
+
+A `Module` is a UI component that can participate in a story potentially
+composed of many different `Module`s. A `Module`'s lifecycle is tightly
 bound to the story to which it was added. In addition to the capabilities
 provided to all Peridot components via `fuchsia::modular::ComponentContext`, a `Module` is given
-additional capabilities via its `fuchsia::modular::ModuleContext`.
+additional capabilities through its `fuchsia::modular::ModuleContext`.
 
 ## `SimpleMod`
 
-### Mod Initialization
+### Mod initialization
 
 The first step to writing a `Module` is implementing the initializer.
 
@@ -79,4 +81,3 @@
 ```
 
 The module is responsible for calling `done` once its shutdown sequence is complete.
-
diff --git a/docs/concepts/packages/package_url.md b/docs/concepts/packages/package_url.md
index 38826ae..91e148d 100644
--- a/docs/concepts/packages/package_url.md
+++ b/docs/concepts/packages/package_url.md
@@ -98,7 +98,8 @@
 
 A package name consists of a sequence of up to 100 of the following latin-1
 characters in any order: digits (`0` to `9`), lower-case letters (`a` to `z`),
-hyphen (`-`), and period (`.`).  No other characters are permitted.
+hyphen (`-`), underscore (`_`), and period (`.`).
+No other characters are permitted.
 
 A package's name must be unique among all packages in a repository.
 Conversely, packages within different repositories are considered distinct even
diff --git a/docs/concepts/principles/_toc.yaml b/docs/concepts/principles/_toc.yaml
new file mode 100644
index 0000000..b186292
--- /dev/null
+++ b/docs/concepts/principles/_toc.yaml
@@ -0,0 +1,16 @@
+# Copyright 2020 The Fuchsia Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Please, read https://fuchsia.dev/fuchsia-src/community/contribute/docs/documentation_navigation_toc
+# before making changes to this file, and add a member of the fuchsia.dev
+# team as reviewer.
+toc:
+- title: "Secure"
+  path: /docs/concepts/principles/secure.md
+- title: "Updatable"
+  path: /docs/concepts/principles/updatable.md
+- title: "Inclusive"
+  path: /docs/concepts/principles/inclusive.md
+- title: "Pragmatic"
+  path: /docs/concepts/principles/pragmatic.md
diff --git a/docs/concepts/principles/inclusive.md b/docs/concepts/principles/inclusive.md
new file mode 100644
index 0000000..e7676ad
--- /dev/null
+++ b/docs/concepts/principles/inclusive.md
@@ -0,0 +1,98 @@
+# Inclusive
+
+Fuchsia is an open source project that is inclusive by design,
+from the architecture of the platform
+to the open source community that we’re building.
+
+Applying the principles of inclusion
+through these dual lenses is a challenge we embrace.
+We have not yet achieved all of our goals,
+but we’re committed to doing the work to uphold this principle
+with the help of our developer community.
+
+## Fuchsia architecture is inclusive by design
+
+### Developers can use their runtime and language of choice
+
+**[Fuchsia Interface Definition Language (FIDL)](/docs/concepts/fidl/overview.md)
+allows diverse clients and services to interoperate**
+
+Fuchsia is highly extensible:
+developers can create components using the language and environment they prefer.
+Both components and FIDL protocols are accessible to any runtime.
+Software from different runtimes can integrate together to form a cohesive
+experience. Fuchsia simplifies the development model,
+making nearly all user space software a component,
+from system services to end-user applications.
+
+### Fuchsia is designed to support a wide range of hardware
+
+**[Fuchsia’s Driver Development Kit](/docs/concepts/drivers/overview.md)
+allows for a diverse hardware ecosystem**
+
+Fuchsia aims to have a binary-stable interface for drivers.
+In this approach,
+developers can write drivers once and
+these drivers will continue to work as Fuchsia evolves.
+There’s no need to modify or recompile drivers when there’s an
+update to Fuchsia. This allows for a large hardware ecosystem that
+is scalable and easier to maintain.
+
+### Anyone can build and test Fuchsia
+
+**[Fuchsia's emulator (FEMU)](/docs/get-started/set_up_femu.md)
+makes it easier for most development environments to run Fuchsia**
+
+FEMU allows you to test Fuchsia components and applications
+without needing a Fuchsia device. FEMU looks and behaves like a Fuchsia device,
+with the exception that no paving is required.
+FEMU simulates different processes and environments
+that any developer can use to test and build Fuchsia.
+
+## Open source community
+
+### All developers are welcome to contribute
+
+**[Guidelines and resources](/CONTRIBUTING.md)
+are available to help Fuchsia developers**
+
+Google and the Fuchsia team are committed
+to preserving and fostering a diverse, inclusive, and welcoming community.
+As an open source effort, we welcome high-quality, well-tested contributions
+from all. [Our code of conduct](/CODE_OF_CONDUCT.md)
+is in place to ensure that community discussions are productive and kind.
+
+### Inclusive language is a core value
+
+**[Respectful code practices](/docs/contribute/respectful_code.md)
+reduce harm and bias**
+
+Fuchsia's values include treating each other with dignity.
+It’s important that everyone can contribute
+without facing the harmful effects of bias and discrimination.
+Our respectful code guidelines aim to eliminate terms
+that perpetuate discrimination in our codebase, user
+interfaces, and documentation.
+
+### Communication channels are open
+
+**[Our bug tracking system](/docs/contribute/report-issue.md)
+and [mailing lists](/docs/contribute/community/get-involved.md)
+are public**
+
+The open source community can stay informed about Fuchsia updates and progress
+by joining our mailing lists.
+Fuchsia invites developers to contribute and report issues though our
+bug tracking system.
+The Fuchsia project uses Gerrit's web-based UI to manage code and
+documentation reviews.
+
+### Our roadmap is public
+
+**Fuchsia is a [work in progress](/docs/contribute/roadmap.md)**
+
+As the project evolves,
+Fuchsia is striving to be as open as possible about the state of
+the code and roadmap. The [Fuchsia RFC process](/docs/contribute/governance/rfcs/README.md)
+aims to provide a consistent and transparent path
+for making project-wide, technical decisions.
diff --git a/docs/concepts/principles/pragmatic.md b/docs/concepts/principles/pragmatic.md
new file mode 100644
index 0000000..427a46a
--- /dev/null
+++ b/docs/concepts/principles/pragmatic.md
@@ -0,0 +1,77 @@
+# Pragmatic
+
+Fuchsia is a production-grade operating system designed
+to power consumer devices and products used for business-critical applications.
+As such, Fuchsia is not a playground for experimental operating system concepts.
+Instead, practical use cases arising from partner and product needs drive the
+platform’s roadmap.
+
+By prioritizing security, updatability, and performance,
+our goal is to create an operating system
+that meets the needs and expectations of developers, manufacturers, and
+consumers.
+
+Fuchsia may delay in pursuing new or experimental features
+to ensure that Fuchsia is as good—or better—than the alternatives.
+
+## A kernel that is practical, not minimal
+
+**[Zircon](/docs/concepts/kernel/README.md)
+is a pragmatic, message-passing kernel—not a microkernel**
+
+Although Fuchsia applies many of the concepts popularized by microkernels,
+Fuchsia does not strive for minimality.
+For example, Fuchsia has over 170 syscalls,
+which is vastly more than that of a typical microkernel.
+Instead of minimality,
+the system architecture is guided by practical concerns
+about security, privacy, and performance.
+
+## Fuchsia provides pathways for porting existing software
+
+**[A POSIX-lite API](https://fuchsia.dev/reference/fidl/fuchsia.posix)
+ eases porting costs;
+ [the component framework](/docs/concepts/components/v2/runners.md)
+ encourages developers to bring their existing application runtimes**
+
+There is a large ecosystem of software for existing platforms;
+developers shouldn't need to rewrite everything from scratch.
+Fuchsia supports a subset of POSIX to ease porting costs.
+Furthermore, Fuchsia’s software model encourages developers
+to bring their own runtimes in the form of component runners.
+This allows developers to use their desired application frameworks
+and re-use much of their application code.
+
+## A flexible scheduler optimizes the system
+
+**[Fair scheduling](/docs/concepts/kernel/fair_scheduler.md)
+gives the system more flexibility to schedule work**
+
+Increasing the choices available to the system scheduler gives the scheduler
+the flexibility to optimize for power, throughput, or latency,
+as appropriate for the situation.
+At any given time, there are more threads in the system
+that are ready to do useful work than there would be
+if threads commonly blocked one another.
+
+## On the roadmap
+
+This section covers features on
+[Fuchsia's roadmap](/docs/contribute/roadmap.md).
+
+### Performance is a priority
+
+**[Asynchronous communication](/docs/concepts/fidl/overview.md#messaging_models)
+reduces latency**
+
+Fuchsia makes heavy use of asynchronous communication,
+which reduces latency by letting the sender proceed
+without waiting for the receiver.
+This is important for delivering software that can come and go
+on a device as needed,
+to account for network latency.
+
+Fuchsia does not yet achieve its performance goals,
+but this is an area under active development.
+For example, performance related storage enhancements are on the
+[project roadmap](/docs/contribute/roadmap.md).
diff --git a/docs/concepts/principles/secure.md b/docs/concepts/principles/secure.md
new file mode 100644
index 0000000..528408a
--- /dev/null
+++ b/docs/concepts/principles/secure.md
@@ -0,0 +1,56 @@
+# Secure
+
+Security and privacy are woven deeply into the architecture of Fuchsia.
+The basic building blocks of Fuchsia, the kernel primitives,
+are exposed to applications as object-capabilities.
+This means that applications running on Fuchsia have no ambient authority:
+applications can interact only with the objects
+to which they have been granted access explicitly.
+
+Software is delivered in hermetic packages and everything is sandboxed.
+All software that runs on the system, including applications and system
+components, receives the least privilege it needs to perform its job and
+gains access only to the information it needs to know.
+Because capabilities routing and software isolation are enforced by the
+operating system, developers don’t have to build an additional
+system for security.
+
+## Fuchsia builds on a kernel designed to securely isolate software
+
+**[Zircon](/docs/concepts/kernel/README.md)
+is a capability-based, object-oriented kernel**
+
+The Zircon system fully isolates processes by default,
+and must explicitly grant capabilities and resources.
+Fuchsia passes capabilities and resources by handles rather than name,
+which leads to a system that only grants software access to what it needs.
+
+## Components are the fundamental unit of software execution
+
+**[Components](/docs/concepts/components/v2/introduction.md)
+are isolated containers for Fuchsia software**
+
+Nearly all user space software is a component,
+from system services to end-user applications.
+The component framework encourages the composition of loosely coupled software.
+Capabilities used and exposed must be explicitly declared.
+
+## Software is delivered in self-contained packages
+
+**[Packages](/docs/concepts/packages/package.md)
+have everything they need to run every time**
+
+Components are distributed through hermetic, or self-contained, packages
+that include all needed files.
+Fuchsia packages are a collection of components, files, and metadata.
+Isolated namespaces mean a component only has visibility to its own package.
+
+## Fuchsia has no global file system or ambient authority
+
+**[Namespaces](/docs/concepts/framework/namespaces.md)
+prevent programs from escaping their containers**
+
+Fuchsia aims to have no ambient authority,
+which means every operation is scoped to an object capability.
+Similarly, Fuchsia has no global file system.
+Instead, each program is given its own local namespace in which to operate.
diff --git a/docs/concepts/principles/updatable.md b/docs/concepts/principles/updatable.md
new file mode 100644
index 0000000..099fcea
--- /dev/null
+++ b/docs/concepts/principles/updatable.md
@@ -0,0 +1,69 @@
+# Updatable
+
+Fuchsia distributes software in packages,
+which are hermetically sealed bundles of components, related files, and dependencies.
+Fuchsia packages are designed to be updated independently or even delivered ephemerally,
+which means they can come and go from the device as needed and the software is always up to date,
+like a web page.
+
+Fuchsia aims to provide drivers with a binary-stable interface.
+In the future,
+drivers compiled for one version of Fuchsia will continue to work
+in future versions of Fuchsia without needing to be modified or even recompiled.
+This approach means that Fuchsia devices will be able
+to update to newer versions of Fuchsia seamlessly while keeping their existing drivers.
+
+## Almost all software on Fuchsia is a component
+
+**[The component framework](/docs/concepts/components/v2/introduction.md)
+makes it easier to update the system as new software is created**
+
+The kernel has a minimal set of responsibilities,
+nearly everything else is in a user space component.
+Components are identified by URLs and
+can be resolved, downloaded, and executed on demand like the web.
+They are governed by the same mechanisms and they all work together.
+Hermetic packaging of components leads to more portable software.
+
+## Software is interchangeable and reusable
+
+**[Fuchsia Interface Definition Language (FIDL)](/docs/concepts/fidl/overview.md)
+enables loose coupling between components**
+
+Components exchange capabilities as defined by FIDL protocols.
+Software is composed at runtime through protocols
+rather than through static composition of libraries.
+Fuchsia has no system libraries.
+Even the C standard library [(libc)](/docs/concepts/system/libc.md)
+is expressed as a dependency,
+delivered only when software needs it.
+Components can be swapped with another implementation
+as long they express the same FIDL protocol.
+
+## Push updates and security patches to all products on demand
+
+**[Fuchsia packages](/docs/concepts/packages/package.md)
+are the units of software distribution**
+
+All software is delivered in packages that
+can be updated independently and delivered on demand, like the web.
+This enables a vulnerability patch to be pushed to all Fuchsia products at once
+without the need for individual product coordination.
+
+## On the roadmap
+
+This section covers features on
+[Fuchsia's roadmap](/docs/contribute/roadmap.md).
+
+### Update the system without modifying the driver
+
+**[Drivers](/docs/concepts/drivers/getting_started.md)
+and system services are designed as user space components that
+can be updated independently of the core OS**
+
+We are designing the system so that Fuchsia products can receive system updates
+without having to modify or recompile drivers.
+Drivers, system services, and end-user applications would be updated
+independently through the same mechanism, reducing the maintenance burden.
+Device owners could receive Fuchsia updates without having to update
+their drivers.
diff --git a/docs/concepts/system/abi/system.md b/docs/concepts/system/abi/system.md
index 92237a8..4f2f0f6 100644
--- a/docs/concepts/system/abi/system.md
+++ b/docs/concepts/system/abi/system.md
@@ -113,8 +113,8 @@
 #### meta
 
 By convention, the `meta` directory in a package contains metadata files that
-describe the package. The structure of this metadata, include the data formats
-used by these files, are part of the system ABI.
+describe the package. The structure of this metadata, including the data formats
+used by these files, is part of the system ABI.
 
 #### lib
 
diff --git a/docs/concepts/testing/OWNERS b/docs/concepts/testing/OWNERS
index bcbb464..66e7889 100644
--- a/docs/concepts/testing/OWNERS
+++ b/docs/concepts/testing/OWNERS
@@ -1 +1 @@
-per-file test_component.md=anmittal@google.com
\ No newline at end of file
+per-file v1_test_component.md=anmittal@google.com
\ No newline at end of file
diff --git a/docs/concepts/testing/_toc.yaml b/docs/concepts/testing/_toc.yaml
index 5ed92b6..d9863cd 100644
--- a/docs/concepts/testing/_toc.yaml
+++ b/docs/concepts/testing/_toc.yaml
@@ -10,12 +10,14 @@
   path: /docs/concepts/testing/testability_rubric.md
 - title: "Tests as components"
   path: /docs/concepts/testing/tests_as_components.md
+- title: "Test components (Components v1)"
+  path: /docs/concepts/testing/v1_test_component.md
 - title: "Test Runner Framework"
   section:
   - title: "Introduction"
     path: /docs/concepts/testing/test_runner_framework.md
-  - title: "Test component"
-    path: /docs/concepts/testing/test_component.md
+  - title: "Test components (Components v2)"
+    path: /docs/concepts/testing/v2_test_component.md
   - title: "Test environments"
     path: /docs/concepts/testing/environments.md
 - title: "Scripting Layer for Fuchsia (SL4F)"
diff --git a/docs/concepts/testing/images/hello_world_topology.png b/docs/concepts/testing/images/hello_world_topology.png
new file mode 100644
index 0000000..c0aafbd
--- /dev/null
+++ b/docs/concepts/testing/images/hello_world_topology.png
Binary files differ
diff --git a/docs/concepts/testing/test_component.md b/docs/concepts/testing/test_component.md
deleted file mode 100644
index f333a52..0000000
--- a/docs/concepts/testing/test_component.md
+++ /dev/null
@@ -1,291 +0,0 @@
-# Test Component
-
-## Create a test component
-
-### BUILD.gn
-
-```gn
-import("//src/sys/build/components.gni")
-
-executable("my_test") {
-  sources = [ "my_test.cc" ]
-  testonly = true
-  deps = [
-    "//src/lib/fxl/test:gtest_main",
-    "//third_party/googletest:gtest",
-  ]
-}
-
-fuchsia_component("my-test-component") {
-  testonly = true
-  manifest = "meta/my_test.cmx"
-  deps = [ ":my_test" ]
-}
-
-fuchsia_test_package("my-test-package") {
-  test_components = [ ":my-test-component" ]
-}
-
-group("tests") {
-  deps = [ ":my-integration-test" ]
-  testonly = true
-}
-```
-
-`test_package` will expect that there is a corresponding cmx file in the `meta`
-folder. So for above example there should be a `my_test.cmx` file in `meta/`.
-
-See also: [test packages][test-packages]
-
-### meta/my\_test.cmx
-
-```json
-{
-    "program": {
-        "binary": "bin/my_test"
-    },
-    "sandbox": {
-        "services": [...]
-    }
-}
-```
-
-## Running the tests
-
-To run a Fuchsia test out of your build, execute:
-
-<pre class="prettyprint">
-<code class="devsite-terminal">fx test <var>TEST_NAME</var></code>
-</pre>
-
-For more information, see [Run Fuchsia tests][executing-tests].
-
-## Isolated Storage
-
-- By default, the test component is launched in a new hermetic environment.
-- The generated environment name is of form test\_env\_XXXXX, where XXXXX is a
-  randomly generated number.
-- Each test component receives a new isolated storage directory.
-- The directory is deleted after the text exits, regardless of the test's
-  outcome.
-
-### Keep storage for debugging
-
-If you need to keep test storage for debugging after the test ends, use
-[run-test-component][run-test-component] in the Fuchsia shell and pass
-`--realm-label` flag.
-
-The `--realm-label` flag defines the label for an environment that your test
-runs in. When the test ends, the storage won't be deleted automatically -
-it'll be accessible at a path under /data. Assuming you:
-
-- gave your test component (in package `mypackage` with component manifest
-  `myurl.cmx`) access to the "isolated-persistent-storage" feature
-- passed --realm-label=foo to run-test-component
-- wrote to the file `/data/bar` from the test binary
-- can connect to the device via `fx shell`
-
-You should see the written file under the path
-`/data/r/sys/r/<REALM>/fuchsia.com:<PACKAGE>:0#meta:<CMX>/<FILE>`, e.g.
-`/data/r/sys/r/foo/fuchsia.com:mypackage:0#meta:myurl.cmx/bar`
-
-Assuming you can connect to the device via ssh, you can get the data off the
-device with the in-tree utility `fx scp`.
-
-When you're done exploring the contents of the directory, you may want to
-delete it to free up space or prevent it from interfering with the results of
-future tests.
-
-## Ambient Services
-
-All test components are started in a new hermetic environment. By default, this
-environment only contains a few basic services (ambient):
-
-```text
-"fuchsia.process.Launcher"
-"fuchsia.process.Resolver"
-"fuchsia.sys.Environment"
-"fuchsia.sys.Launcher"
-"fuchsia.sys.Loader"
-```
-
-Tests can use these services by mentioning them in their `sandbox > services`.
-
-## Logger Service
-
-Tests and the components launched in a hermetic environment will have access to system's `fuchsia.logger.LogSink` service if it is included in their sandbox. For tests to inject Logger, the tests must use `injected-services` (see below). Then, the injected Logger service takes precedence.
-
-## Restricting log severity
-
-Tests may be configured to fail when the component's test environment produces
-high severity [logs][syslogs]. This is useful for when such logs, for instance
-when such logs are unexpected, as they indicate an error.
-
-A test might expect to log at ERROR severity. For example, the test might be
-covering a failure condition & recovery steps. Other tests might expect not to
-log anything more severe than INFO. The common case and default behavior is for
-errors above WARN level to be considered failures, but there are configuration
-options to override it.
-
-For instance, to allow a test to produce **ERROR** logs:
-
-  * {Using fuchsia\_test\_package}
-
-  ```gn
-  fuchsia_component("my-package") {
-    testonly = true
-    manifest = "meta/my-test.cmx"
-    deps = [ ":my_test" ]
-  }
-
-  fuchsia_test_package("my-package") {
-    test_specs = {
-        log_settings = {
-          max_severity = "ERROR"
-        }
-    }
-    test_components = [ ":my-test" ]
-  }
-  ```
-
-  * {Using test\_package}
-
-  ```gn
-  test_package("my-package") {
-    deps = [
-      ":my_test",
-    ]
-
-    meta = []
-      {
-        path = rebase_path("meta/my-test.cmx")
-        dest = "my-test.cmx"
-      },
-    ]
-
-    tests = [
-      {
-        log_settings = {
-          max_severity = "ERROR"
-        }
-        name = "my_test"
-        environments = basic_envs
-      },
-    ]
-  }
-  ```
-
-To make the test fail on any message more severe than **INFO** set `max_severity`
-to **"INFO"**.
-
-Valid values for `max_severity`: `TRACE`, `DEBUG`, `INFO`, `WARN`, `ERROR`, `FATAL`.
-
-If your test was already configured using [legacy methods][legacy-restrict-logs]
-you will need to remove your test from the config file (eg.
-max_severity_fuchsia.json) and run `fx ota`.
-
-If the test is not removed from the legacy list, the configuration in legacy
-list would be preferred and you will see a warning when running the test.
-
-## Running test cases in parallel
-
-  [Test Runner Framework][trf] makes it easy to run test cases in parallel by
-  standardizing the option across various test runtimes. [Test runners][test-runner]
-  decide the default value for how many tests can run in parallel but developers can
-  override it using `BUILD.gn`.
-
-  * {Using fuchsia\_test\_package}
-
-  ```gn
-  fuchsia_component("my-package") {
-    testonly = true
-    manifest = "meta/my-test.cml"
-    deps = [ ":my_test" ]
-  }
-
-  fuchsia_test_package("my-package") {
-    test_specs = {
-        parallel = 1
-    }
-    test_components = [ ":my-test" ]
-  }
-  ```
-
-  * {Using test\_package}
-
-  ```gn
-  test_package("my-package") {
-    deps = [
-      ":my_test",
-    ]
-
-    meta = []
-      {
-        path = rebase_path("meta/my-test.cml")
-        dest = "my-test.cm"
-      },
-    ]
-
-    tests = [
-      {
-        parallel = 1
-        name = "my_test"
-        environments = basic_envs
-      },
-    ]
-  }
-  ```
-
-NOTE: This feature only works with v2 component tests.
-
-### Running the test
-
-When running the test on development device, prefer `fx test` to run the test.
-The tool will automatically pick the configuration and pass it to
-run-test-component. If for some reason you need to use `run-test-component`,
-you need to pass the flag yourself.
-
-```sh
-fx shell run-test-component --max-log-severity=ERROR <test_url>
-```
-
-## Run external services
-
-If your test needs to use (i.e. its sandbox includes) any services other than the ambient and logger services above, you must perform either, both or none:
-
-- Inject the services by starting other components that provide those services in the hermetic test environment
-- Request non-hermetic system services be included in the test environment, when a service cannot be faked or mocked, see [Other system services](#Other-system-services).
-
-To inject additional services, you can add a `injected-services` clause to the manifest file's facets:
-
-```json
-"facets": {
-  "fuchsia.test": {
-    "injected-services": {
-        "service_name1": "component_url1",
-        "service_name2": "component_url2"
-    }
-  }
-}
-```
-
-`fx test` will start `component_url1` and `component_url2` and the
-test will have access to `service_name1` and `service_name2`. Note that this makes the injected services available in the test environment, but the test component still needs to "use" them by including the service in its `sandbox > services`.
-
-### Other system services
-
-There are some services that cannot be faked or mocked. You can connect to real
-system versions of these services by mentioning these services in
-`system-services`. Services that cannot be faked are listed
-[here](/garnet/bin/run_test_component/test_metadata.cc).
-
-Test can only list allowlisted system services under `"system-services"` as
-demonstrated above.
-
-[executing-tests]: /docs/development/testing/run_fuchsia_tests.md
-[run-test-component]: /docs/development/testing/run_fuchsia_tests.md
-[syslogs]: /docs/development/diagnostics/logs/README.md
-[test-packages]: /docs/development/components/build.md#test-packages
-[legacy-restrict-logs]: https://fuchsia.googlesource.com/fuchsia/+/1529a885fa0b9ea4867aa8b71786a291158082b7/docs/concepts/testing/test_component.md#restricting-log-severity
-[trf]: test_runner_framework.md
-[test-runner]: test_runner_framework.md#test-runner
diff --git a/docs/concepts/testing/test_flake_policy.md b/docs/concepts/testing/test_flake_policy.md
index 3fbd911..ea7662e 100644
--- a/docs/concepts/testing/test_flake_policy.md
+++ b/docs/concepts/testing/test_flake_policy.md
@@ -71,15 +71,4 @@
 issue. Once the issue has been fixed, the bug can be closed, and the test can be
 re-enabled. If any reverted patches need to re-land, they can re-land safely.
 
-## Improvements and Tooling
-
-Ongoing efforts to improve tooling surrounding flakes are actively underway.
-
-These include:
-
--   Automatically assigning issues for resolving flakes, based on information present in OWNERs
-    files.
--   "Deflaking" infrastructure, to re-run tests in high volume before they are
-    committed.
-
-As improvements are made, this document will be updated with the latest policy.
+When fixing a flake, verify the fix by [testing for flakiness in CQ](/docs/development/testing/testing_for_flakiness_in_cq.md).
diff --git a/docs/concepts/testing/test_runner_framework.md b/docs/concepts/testing/test_runner_framework.md
index 8e7c15f..671d173 100644
--- a/docs/concepts/testing/test_runner_framework.md
+++ b/docs/concepts/testing/test_runner_framework.md
@@ -1,4 +1,4 @@
-# Introduction to the Fuchsia Testing Framework
+# Introduction to the Fuchsia Test Runner Framework
 
 This document introduces the Fuchsia Test Runner Framework along with
 fundamental concepts and terminology around testing in Fuchsia.
@@ -75,6 +75,8 @@
 natively against the language-specific test library and do not need to manually
 export results under the test suite protocol.
 
+For a working example follow this [link][v2-driver-pattern].
+
 ## Hermeticity
 
 A test is *hermetic* if it [uses][manifests-use] or [offers][manifests-offer] no
@@ -113,3 +115,4 @@
 [realms]: /docs/concepts/components/v2/realms.md
 [realms-definitions]: /docs/concepts/components/v2/realms.md#definitions
 [test-suite-protocol]: /docs/concepts/components/v2/realms.md
+[v2-driver-pattern]: v2_test_component.md#driver_pattern_for_v2_component_tests
diff --git a/docs/concepts/testing/testability_rubric.md b/docs/concepts/testing/testability_rubric.md
index ef16173..a7af99d 100644
--- a/docs/concepts/testing/testability_rubric.md
+++ b/docs/concepts/testing/testability_rubric.md
@@ -20,16 +20,9 @@
 
 ### Your goals as a Testability reviewer
 
-*   **Determine if the change is tested.** Apply Testability-Review+1 if you
+*   **Determine if the change is tested.** Apply Code-Review+2 if you
     agree that it’s tested, and reply with a note for what’s missing if it’s not.
-*   Focus on whether the change is tested, not necessarily on what the change
-    actually does. For instance you may apply Testability+1 if the change is
-    well tested and at the same time Code-Review-1 if you would not like to see
-    the change merged for other reasons.
 *   Apply the standard (this doc) consistently.
-*   For your own changes, it is okay to self Testability-Review+1 provided that
-    the change clearly follows this rubric. If in doubt, seek approval from
-    another testability reviewer.
 *   If the change needs to be amended to meet the standards, provide actionable
     feedback.
 *   Promote Fuchsia testing & testability.
@@ -92,7 +85,7 @@
 ## What does not require testing
 
 Missing testing coverage for the below should not prevent a change from
-receiving Testability+1.
+receiving Code-Review+2.
 
 *   **Logging.** In most cases, it’s probably not worth testing the log output
     of components. The log output is usually treated as opaque data by the rest
@@ -132,184 +125,15 @@
     are not easily observable, such as unexposed implementation
     details, heuristics, or "cosmetic" changes (e.g. background color of a UI).
     Tests of the style `assert_eq!(CONFIG_PARAM, 5);` are not considered useful
-    and are not required by testability. However, if the CL results in an easily
-    observable behavioral change, the CL should include a test for the new
-    behavior.
+    and are not required by testability. However, if the contribution results
+    in an easily observable behavioral change, that contirbution should
+    include a test for the new behavior.
 
 ## What does require testing
 
-### Recommended: Test for flakiness (if supported)
+### If fixing a flake, test for flakiness in CQ
 
-This is currently recommended. Once <http://fxbug.dev/50301> is done, this will
-be automatically included for tests that are determined to be affected.
-
-Note: This feature is not currently supported for bringup builders.
-
-{% dynamic if user.is_googler %}
-
-Note: Multiplying internal tests on public changes is not allowed, to avoid
-leaking confidential information. If you want to multiply an internal test,
-use `fx make-integration-patch` to create an internal CL that patches your CL
-into the integration repository. Then add the necessary MULTIPLY line to the
-integration CL instead of the original public CL, and CQ+1 the integration
-CL.
-
-{% dynamic endif %}
-
-As a testability reviewer, if a change adds or modifies tests, you
-should make sure the author correctly tests for flakiness using the MULTIPLY
-feature as described below.
-
-As a change author, when you add or modify tests, you should tell the
-infrastructure to run those tests multiple times with a MULTIPLY field in the
-commit message. You would add something like this to your commit message:
-
-```txt
-MULTIPLY: test_name (os): run_count
-```
-
-For example:
-
-```txt
-MULTIPLY: foo_tests (fuchsia): 30
-```
-
-Note: "os" and "run_count" are both optional; see [below](#multiply-examples)
-for more examples.
-
-Then do a CQ dry run (or choose a tryjob that runs your tests).
-These tests show as separate shards for each test, which run that test
-repeatedly until it fails, up to the specified run count. The timeout for
-running these tests is 40 minutes on most builders. If a test takes too long,
-the shard may time out.
-
-The test name can be any of the following:
-
-* The test package URL (for fuchsia tests) or path (for host tests). This is
-  the name that Flake Fetcher uses to refer to tests, and is seen in the
-  "name" field of each entry in `out/default/tests.json`. That file is
-  created after you run `fx set` inside of your Fuchsia directory.
-* A regular expression (using Go's [regular expression
-  syntax](https://github.com/google/re2/wiki/Syntax)) that matches the test
-  name as described above. However, note that if a single multiplier matches
-  more than 5 different tests, it will be rejected (to prevent accidental
-  DoSing). If this happens to you, simply edit your commit message locally or
-  in the Gerrit UI to make your regular expression more specific.
-
-The "os" field, if specified, should be either "fuchsia", "linux", or "mac".
-If left unset, the multiplier will match any test, regardless of the test's
-operating system, as long as the name matches.
-
-If "run_count" is left unspecified, the infrastructure will use historical
-test duration data to calculate a number of runs that will produce a single
-multiplied test shard whose duration is similar to the expected duration of
-the other shards (although the calculated run count will be limited to a
-maximum of 2000). Longer tests will be run fewer times, shorter tests more
-times.
-
-Note: When specifying "run_count", it's important to have a space after the
-colon and before the run_count so as to distinguish it from colons in the test
-name. Otherwise the colon and run_count will be treated as part of the test
-name.
-
-Note: If your CL increases a test's duration, then the historical duration
-data may no longer be accurate and the number of runs calculated by the
-infrastructure may cause the shard to time out. In this case, you'll have to
-edit the commit message and specify a lower number of runs.
-
-#### Determine success
-
-If it worked, any builders running the tests specified by the MULTIPLY feature
-will add comments to the CL that say:
-
-```txt
-A builder created multiplier shards. Click the following link for more details:
-```
-
-This comment includes a link to the build that will run the multiplied tests. If
-the build is completed, you should see a step like `multiplied:<shard
-name>-<test name>` under one of the `passes`, `flakes`, or `failures` steps. If
-the build is not yet completed, you can click on the link under the `build` step
-named `<builder name>-subbuild`, which will take you to the subbuild build page
-where you should see a similar `multiplied` step. Since the comment doesn't
-specify which tests were multiplied, you can look at the build pages to confirm
-(in case you multiplied more than one test).
-
-For example:
-
-![multiplied shard screenshot](multiplied-shard-screenshot.png)
-
-If no such comment appears, then there probably is an error with the syntax or
-the test is unable to run in any of the regular CQ builders. In this case, you
-will have to either add it to the build graph so that it is run by one of the
-builders or manually choose the tryjob that runs the test if it's run in an
-optional builder.
-
-#### Syntax examples {#multiply-examples}
-
-* Title-case "Multiply" can be used instead of all-caps "MULTIPLY":
-
-  ```txt
-  Multiply: foo_tests (fuchsia): 30
-  ```
-
-* If you leave out the OS, the multiplier will be applied to any test that
-  matches the multiplier name, regardless of OS:
-
-  ```txt
-  Multiply: foo_tests: 30
-  ```
-
-* If you leave out the number of runs, the infrastructure will calculate a
-  number of runs that will fill up exactly one shard:
-
-  ```txt
-  Multiply: foo_tests (linux)
-  ```
-
-* You can also leave out both the OS and the number of runs:
-
-  ```txt
-  Multiply: foo_tests
-  ```
-
-* To multiply more than one test, add extra "Multiply" lines:
-
-  ```txt
-  Multiply: foo_tests
-  Multiply: bar_tests
-  ```
-
-* Comma-separated multipliers in a single line are also supported:
-
-  ```txt
-  Multiply: foo_tests: 5, bar_tests (fuchsia): 6
-  ```
-
-* You can reference fuchsia tests by package URL and host tests by path:
-
-  ```txt
-  Multiply: fuchsia-pkg://fuchsia.com/foo_tests#meta/foo_tests.cmx
-  Multiply: host_x64/bar_tests
-  ```
-
-* Regex and substring matching is also supported:
-
-  ```txt
-  Multiply: fuchsia.com/foo_tests
-  ```
-
-* This JSON syntax is also valid:
-
-  ```json
-  Multiply: `[
-    {
-      "name": "foo_bin_test",
-      "os": "fuchsia",
-      "total_runs": 30
-    }
-  ]`
-  ```
+If fixing a flake, verify the fix by [testing for flakiness in CQ](/docs/development/testing/testing_for_flakiness_in_cq.md).
 
 ### Tests should not sleep
 
diff --git a/docs/concepts/testing/tests_as_components.md b/docs/concepts/testing/tests_as_components.md
index 82bdd80..f26d566 100644
--- a/docs/concepts/testing/tests_as_components.md
+++ b/docs/concepts/testing/tests_as_components.md
@@ -249,4 +249,4 @@
 [fuchsia-package-gni]: /src/sys/build/fuchsia_package.gni
 [fuchsia-test-package-gni]: /src/sys/build/fuchsia_test_package.gni
 [fuchsia-unittest-package-gni]: /src/sys/build/fuchsia_unittest_package.gni
-[test-component]: /docs/concepts/testing/test_component.md
+[test-component]: /docs/concepts/testing/v1_test_component.md
diff --git a/docs/concepts/testing/v1_test_component.md b/docs/concepts/testing/v1_test_component.md
new file mode 100644
index 0000000..5e98af8
--- /dev/null
+++ b/docs/concepts/testing/v1_test_component.md
@@ -0,0 +1,242 @@
+# Test Components (Components v1)
+
+<<../components/_v1_banner.md>>
+
+Note: If looking for a guide to write v2 test component refer
+ [Fuchsia Test Runner Framework][trf].
+
+## Create a test component
+
+### BUILD.gn
+
+```gn
+import("//src/sys/build/components.gni")
+
+executable("my_test") {
+  sources = [ "my_test.cc" ]
+  testonly = true
+  deps = [
+    "//src/lib/fxl/test:gtest_main",
+    "//third_party/googletest:gtest",
+  ]
+}
+
+fuchsia_component("my-test-component") {
+  testonly = true
+  manifest = "meta/my_test.cmx"
+  deps = [ ":my_test" ]
+}
+
+fuchsia_test_package("my-test-package") {
+  test_components = [ ":my-test-component" ]
+}
+
+group("tests") {
+  deps = [ ":my-integration-test" ]
+  testonly = true
+}
+```
+
+See also: [test packages][test-packages]
+
+### meta/my\_test.cmx
+
+```json
+{
+    "program": {
+        "binary": "bin/my_test"
+    },
+    "sandbox": {
+        "services": [...]
+    }
+}
+```
+
+## Running the tests
+
+To run a Fuchsia test out of your build, execute:
+
+<pre class="prettyprint">
+<code class="devsite-terminal">fx test <var>TEST_NAME</var></code>
+</pre>
+
+For more information, see [Run Fuchsia tests][executing-tests].
+
+## Isolated Storage
+
+- By default, the test component is launched in a new hermetic environment.
+- The generated environment name is of form test\_env\_XXXXX, where XXXXX is a
+  randomly generated number.
+- Each test component receives a new isolated storage directory.
+- The directory is deleted after the text exits, regardless of the test's
+  outcome.
+
+### Keep storage for debugging
+
+If you need to keep test storage for debugging after the test ends, use
+[run-test-component][run-test-component] in the Fuchsia shell and pass
+`--realm-label` flag.
+
+The `--realm-label` flag defines the label for an environment that your test
+runs in. When the test ends, the storage won't be deleted automatically -
+it'll be accessible at a path under /data. Assuming you:
+
+- gave your test component (in package `mypackage` with component manifest
+  `myurl.cmx`) access to the "isolated-persistent-storage" feature
+- passed --realm-label=foo to run-test-component
+- wrote to the file `/data/bar` from the test binary
+- can connect to the device via `fx shell`
+
+You should see the written file under the path
+`/data/r/sys/r/<REALM>/fuchsia.com:<PACKAGE>:0#meta:<CMX>/<FILE>`, e.g.
+`/data/r/sys/r/foo/fuchsia.com:mypackage:0#meta:myurl.cmx/bar`
+
+Assuming you can connect to the device via ssh, you can get the data off the
+device with the in-tree utility `fx scp`.
+
+When you're done exploring the contents of the directory, you may want to
+delete it to free up space or prevent it from interfering with the results of
+future tests.
+
+## Ambient Services
+
+All test components are started in a new hermetic environment. By default, this
+environment only contains a few basic services (ambient):
+
+```text
+"fuchsia.process.Launcher"
+"fuchsia.process.Resolver"
+"fuchsia.sys.Environment"
+"fuchsia.sys.Launcher"
+"fuchsia.sys.Loader"
+```
+
+Tests can use these services by mentioning them in their `sandbox > services`.
+
+## Logger Service
+
+Tests and the components launched in a hermetic environment will have access to system's `fuchsia.logger.LogSink` service if it is included in their sandbox. For tests to inject Logger, the tests must use `injected-services` (see below). Then, the injected Logger service takes precedence.
+
+## Restricting log severity
+
+Tests may be configured to fail when the component's test environment produces
+high severity [logs][syslogs]. This is useful, for instance, 
+when such logs are unexpected, as they indicate an error.
+
+A test might expect to log at ERROR severity. For example, the test might be
+covering a failure condition & recovery steps. Other tests might expect not to
+log anything more severe than INFO. The common case and default behavior is for
+errors above WARN level to be considered failures, but there are configuration
+options to override it.
+
+For instance, to allow a test to produce **ERROR** logs:
+
+  * {Using fuchsia\_test\_package}
+
+  ```gn
+  fuchsia_component("my-package") {
+    testonly = true
+    manifest = "meta/my-test.cmx"
+    deps = [ ":my_test" ]
+  }
+
+  fuchsia_test_package("my-package") {
+    test_specs = {
+        log_settings = {
+          max_severity = "ERROR"
+        }
+    }
+    test_components = [ ":my-test" ]
+  }
+  ```
+
+  * {Using test\_package}
+
+  ```gn
+  test_package("my-package") {
+    deps = [
+      ":my_test",
+    ]
+
+    meta = []
+      {
+        path = rebase_path("meta/my-test.cmx")
+        dest = "my-test.cmx"
+      },
+    ]
+
+    tests = [
+      {
+        log_settings = {
+          max_severity = "ERROR"
+        }
+        name = "my_test"
+        environments = basic_envs
+      },
+    ]
+  }
+  ```
+
+To make the test fail on any message more severe than **INFO** set `max_severity`
+to **"INFO"**.
+
+Valid values for `max_severity`: `TRACE`, `DEBUG`, `INFO`, `WARN`, `ERROR`, `FATAL`.
+
+If your test was already configured using [legacy methods][legacy-restrict-logs]
+you will need to remove your test from the config file (eg.
+max_severity_fuchsia.json) and run `fx ota`.
+
+If the test is not removed from the legacy list, the configuration in legacy
+list would be preferred and you will see a warning when running the test.
+
+### Running the test
+
+When running the test on development device, prefer `fx test` to run the test.
+The tool will automatically pick the configuration and pass it to
+run-test-component. If for some reason you need to use `run-test-component`,
+you need to pass the flag yourself.
+
+```sh
+fx shell run-test-component --max-log-severity=ERROR <test_url>
+```
+
+## Run external services
+
+If your test needs to use (i.e. its sandbox includes) any services other than the ambient and logger services above, you must perform either, both or none:
+
+- Inject the services by starting other components that provide those services in the hermetic test environment
+- Request non-hermetic system services be included in the test environment, when a service cannot be faked or mocked, see [Other system services](#Other-system-services).
+
+To inject additional services, you can add a `injected-services` clause to the manifest file's facets:
+
+```json
+"facets": {
+  "fuchsia.test": {
+    "injected-services": {
+        "service_name1": "component_url1",
+        "service_name2": "component_url2"
+    }
+  }
+}
+```
+
+`fx test` will start `component_url1` and `component_url2` and the
+test will have access to `service_name1` and `service_name2`. Note that this makes the injected services available in the test environment, but the test component still needs to "use" them by including the service in its `sandbox > services`.
+
+### Other system services
+
+There are some services that cannot be faked or mocked. You can connect to real
+system versions of these services by mentioning these services in
+`system-services`. Services that cannot be faked are listed
+[here](/garnet/bin/run_test_component/test_metadata.cc).
+
+Test can only list allowlisted system services under `"system-services"` as
+demonstrated above.
+
+[executing-tests]: /docs/development/testing/run_fuchsia_tests.md
+[run-test-component]: /docs/development/testing/run_fuchsia_tests.md
+[syslogs]: /docs/development/diagnostics/logs/README.md
+[test-packages]: /docs/development/components/build.md#test-packages
+[legacy-restrict-logs]: https://fuchsia.googlesource.com/fuchsia/+/1529a885fa0b9ea4867aa8b71786a291158082b7/docs/concepts/testing/v1_test_component.md#restricting-log-severity
+[trf]: test_runner_framework.md
+
diff --git a/docs/concepts/testing/v2_test_component.md b/docs/concepts/testing/v2_test_component.md
new file mode 100644
index 0000000..eac475bc
--- /dev/null
+++ b/docs/concepts/testing/v2_test_component.md
@@ -0,0 +1,135 @@
+# Test Components (Components v2)
+
+<<../components/_v2_banner.md>>
+
+## Integration Tests
+
+This section defines various patterns commonly used to author integration tests.
+Note that test authors can use other patterns if it makes sense for their
+project.
+
+### Driver pattern for v2 component tests
+
+This section demonstrates how to use the driver pattern to write your test.
+See this `BUILD.gn` file as an example:
+
+```gn
+{% includecode gerrit_repo="fuchsia/fuchsia" gerrit_path="examples/components/basic/integration_tests/BUILD.gn" region_tag="example_snippet" adjust_indentation="auto" %}
+```
+
+The topology for the example will look like:
+
+<br>![Test driver topology](images/hello_world_topology.png)<br>
+
+In this example the test package `hello-world-integration-test` contains four
+components:
+
+- **hello-world-integration-test-component** - Main entry point
+- **hello-world** - Component to test
+- **hello-world-integration-test-driver** - Test driver
+- **archivist-for-embedding** - Helper component which provides services to
+other components.
+
+`hello-world-integration-test-component` has two children:
+
+- **hello-world-integration-test-driver**
+- **archivist-for-embedding**
+
+This is a simple component realm which launches
+`hello-world-integration-test-driver` and offers it helper services.
+
+Note that it exposes `fuchsia.test.Suite` from its child the test driver. The
+topmost component in a test realm must always expose this protocol to integrate
+with the Test Runner Framework.
+
+```json5
+{% includecode gerrit_repo="fuchsia/fuchsia" gerrit_path="examples/components/basic/integration_tests/meta/hello-world-integration-test.cml" region_tag="example_snippet" adjust_indentation="auto" %}
+```
+
+`hello-world-integration-test-driver` contains the test logic and expectations.
+The component launches the `hello-world` component and asserts that it is
+writing the expected strings to the log.
+
+Note that this is a Rust test, and therefore includes
+`rust/default.shard.cml` which sets up capability routing required to integrate
+with the Rust test framework.
+
+```json5
+{% includecode gerrit_repo="fuchsia/fuchsia" gerrit_path="examples/components/basic/integration_tests/meta/hello-world-integration-test-driver.cml" region_tag="example_snippet" adjust_indentation="auto" %}
+```
+
+The code for this example can be found under
+[`//examples/components/basic/integration_tests`][driver-pattern-example].
+
+## Running the tests
+
+To run a Fuchsia test use this command:
+
+<pre class="prettyprint">
+<code class="devsite-terminal">fx test <var>TEST_NAME</var></code>
+</pre>
+
+For more information, see [Run Fuchsia tests][executing-tests].
+
+## Running test cases in parallel
+
+Different test runtimes support different levels of parallelism. Their default
+behaviors might differ. For instance, GoogleTest C++ tests default to running
+serially (one at a time), while Rust tests run multiple tests concurrently.
+
+You can specify the maximum parallelism for tests in the build definition to
+override the default behavior, as shown below.
+
+  * {Using fuchsia_test_package}
+
+  ```gn
+  fuchsia_component("my-package") {
+    testonly = true
+    manifest = "meta/my-test.cml"
+    deps = [ ":my_test" ]
+  }
+
+  fuchsia_test_package("my-package") {
+    test_specs = {
+        parallel = 1
+    }
+    test_components = [ ":my-test" ]
+  }
+  ```
+
+  * {Using test_package}
+
+  ```gn
+  test_package("my-package") {
+    deps = [
+      ":my_test",
+    ]
+
+    meta = []
+      {
+        path = rebase_path("meta/my-test.cml")
+        dest = "my-test.cm"
+      },
+    ]
+
+    tests = [
+      {
+        parallel = 1
+        name = "my_test"
+        environments = basic_envs
+      },
+    ]
+  }
+  ```
+
+When running the test on a development device, prefer `fx test` to run the test.
+The tool automatically picks the configuration and passes it to
+run-test-suite. If for some reason you need to use `run-test-suite`, you need
+to pass the flag.
+
+```sh
+fx shell run-test-suite --parallel=5 <test_url>
+```
+
+[driver-pattern-example]: /examples/components/basic/integration_tests/
+[executing-tests]: /docs/development/testing/run_fuchsia_tests.md
diff --git a/docs/concepts/time/_toc.yaml b/docs/concepts/time/_toc.yaml
new file mode 100644
index 0000000..1d277e1
--- /dev/null
+++ b/docs/concepts/time/_toc.yaml
@@ -0,0 +1,19 @@
+# Copyright 2020 The Fuchsia Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Please, read https://fuchsia.dev/fuchsia-src/community/contribute/docs/documentation_navigation_toc
+# before making changes to this file, and add a member of the fuchsia.dev
+# team as reviewer.
+toc:
+- title: "Overview"
+  path: /docs/concepts/time/overview.md
+- title: "Monotonic"
+  path: /docs/concepts/time/monotonic.md
+- title: "UTC"
+  section:
+  - include: /docs/concepts/time/utc/_toc.yaml
+- title: "Local"
+  path: /docs/concepts/time/local.md
+- title: "Language Support"
+  path: /docs/concepts/time/language_support.md
diff --git a/docs/concepts/time/language_support.md b/docs/concepts/time/language_support.md
new file mode 100644
index 0000000..8822f11
--- /dev/null
+++ b/docs/concepts/time/language_support.md
@@ -0,0 +1,4 @@
+# Time - Language Support
+
+TODO([65785](https://fxbug.dev/65785)): Document time functions in supported
+languages.
\ No newline at end of file
diff --git a/docs/concepts/time/local.md b/docs/concepts/time/local.md
new file mode 100644
index 0000000..87217e9
--- /dev/null
+++ b/docs/concepts/time/local.md
@@ -0,0 +1,4 @@
+# Local Time
+
+TODO([65920](https://fxbug.dev/65920)): Populate this page to document the
+operation of local time on Fuchsia, linking to the internationalization docs.
\ No newline at end of file
diff --git a/docs/concepts/time/monotonic.md b/docs/concepts/time/monotonic.md
new file mode 100644
index 0000000..c541d2d
--- /dev/null
+++ b/docs/concepts/time/monotonic.md
@@ -0,0 +1,18 @@
+# Monotonic Time
+
+Monotonic time is a measurement of the time since the system was powered on and
+is maintained by the kernel. Note that the monotonic time may not always reset
+to zero on reboot and the behavior during sleep/suspend has not yet been
+defined.
+
+Monotonic time is the most reliable time standard on Fuchsia and reading
+monotonic time is usually cheaper than reading UTC or local time. Monotonic time
+is always available and it always increases continuously and monotonically.
+Monotonic time is locked to the frequency of the underlying hardware oscillator
+and does not attempt to correct for any errors in that oscillator.
+
+Since monotonic time counts from power on it, is only meaningful in the context
+of a single power cycle on a single Fuchsia device.
+
+Components may read monotonic time using
+[`zx_clock_get_monotonic`](/docs/reference/syscalls/clock_get_monotonic.md).
\ No newline at end of file
diff --git a/docs/concepts/time/overview.md b/docs/concepts/time/overview.md
new file mode 100644
index 0000000..bf886b7
--- /dev/null
+++ b/docs/concepts/time/overview.md
@@ -0,0 +1,57 @@
+# Time Overview
+
+Components on Fuchsia can read current time using three different
+[time standards][1]:
+
+* **[Monotonic](monotonic.md)**: Monotonic time is a measurement of the time
+  since the system was powered on. Monotonic time always moves forwards and is
+  always available to all applications but is only meaningful with the context
+  of a single power cycle on a single Fuchsia device.
+* **[UTC](utc/overview.md)**: UTC time is the system’s best estimate of
+  [Coordinated Universal Time][2]. UTC is usually acquired over a network
+  so there are conditions under which the system may not know UTC. Developers
+  using UTC should pay particular attention to [UTC behavior](utc/behavior.md)
+  to understand the unique properties of UTC on Fuchsia.
+* **[Local](local.md)**: Local time is the system’s best estimate of the
+  [standard time][3] at the device’s location, aka "wall clock time". Local time
+  is derived from UTC and time zone, so inherits much of the
+  [UTC behavior](utc/behavior.md). There are conditions under which local time
+  is not available. The local time will jump substantially if the user changes
+  time zone.
+
+These time standards are frequently available through the
+[time functions in supported languages](language_support.md) in addition to
+[time syscalls](/docs/reference/syscalls/clock_create.md).
+
+As a developer, you must select the most appropriate time standard to address
+each problem. Monotonic time has the fewest failure modes and the most stable
+behavior so should generally be the default choice unless there is a reason that
+monotonic time will not work. UTC has fewer failure modes and more stable
+behavior than local time so should be preferred over local time unless there is
+some reason that local time is necessary.
+
+For example:
+
+1. Use monotonic time to implement a ten second delay between retries.
+   Monotonic time will be available in all cases so provides the simplest and
+   most reliable solution.
+1. Use UTC time to expire and delete a file stored on disk after seven days.
+   Here monotonic time would not allow the expiry time to be
+   preserved across power cycles and local time would have coupled the
+   correctness of the expiry to the timezone setting.
+1. Use UTC time to timestamp an on-device event that will be read by some
+   server. In this case monotonic time would not work since the server probably
+   does not know when the Fuchsia device last powered on. Using local time would
+   require that the device and server agree on the timezone which would be error
+   prone.
+1. Use local time to display the current time to the user as an analog clock
+   face. Local time is most natural time standard for users so no other time
+   standards are practical here.
+
+Testing code that depends on time can be difficult on any platform. Tools and
+best practices for testing time dependencies on Fuchsia are being developed and
+will be linked here when available.
+
+[1]: https://en.wikipedia.org/wiki/Time_standard
+[2]: https://en.wikipedia.org/wiki/Coordinated_Universal_Time
+[3]: https://en.wikipedia.org/wiki/Standard_time
diff --git a/docs/concepts/time/utc/_toc.yaml b/docs/concepts/time/utc/_toc.yaml
new file mode 100644
index 0000000..f7bfcc0
--- /dev/null
+++ b/docs/concepts/time/utc/_toc.yaml
@@ -0,0 +1,16 @@
+# Copyright 2020 The Fuchsia Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Please, read https://fuchsia.dev/fuchsia-src/community/contribute/docs/documentation_navigation_toc
+# before making changes to this file, and add a member of the fuchsia.dev
+# team as reviewer.
+toc:
+- title: "Overview"
+  path: /docs/concepts/time/utc/overview.md
+- title: "Architecture"
+  path: /docs/concepts/time/utc/architecture.md
+- title: "Algorithms"
+  path: /docs/concepts/time/utc/algorithms.md
+- title: "Behavior"
+  path: /docs/concepts/time/utc/behavior.md
diff --git a/docs/concepts/time/utc/algorithms.md b/docs/concepts/time/utc/algorithms.md
new file mode 100644
index 0000000..8264fbd
--- /dev/null
+++ b/docs/concepts/time/utc/algorithms.md
@@ -0,0 +1,461 @@
+# UTC Synchronization Algorithms
+
+## Introduction
+
+This page defines and explains the algorithms that
+[Timekeeper](/src/sys/time/timekeeper) uses to fulfil its central role in the
+[UTC Architecture](architecture.md).
+
+This information may be of interest if you are working on the time system or
+need a detailed understanding of the internals of UTC on Fuchsia. If you simply
+wish to develop components on Fuchsia that use UTC, consider reading the much
+shorter and simpler [UTC Overview](overview.md) and [UTC Behavior](behavior.md)
+pages instead.
+
+Time source components such as
+[HTTPSDate time source](/src/sys/time/httpsdate_time_source) also use a range of
+protocol-specific algorithms to compute their time samples; refer to the
+README.md in each time source component for further details.
+
+## Overview
+
+We split the operation of Timekeeper into answering six separate questions:
+
+1. **Should a time sample be accepted?** Each time a sample is received from a
+   time source Timekeeper decides whether it should be accepted (in which case
+   it may lead to a change in the system’s understanding of UTC) or rejected (in
+   which case it is discarded).
+2. **Which time source should be used?** Timekeeper
+   maintains a primary UTC estimate that may be influenced by time samples from
+   the Primary, Fallback, or Gating time sources. When more than one of these
+   sources is installed Timekeeper decides which source should be used at each
+   point in time.
+3. **How should a time sample change the estimated UTC?** Timekeeper
+   maintains an estimate of the most probable UTC at all times, along with the
+   uncertainty of this estimate. Timekeeper updates both this estimate and the
+   uncertainty each time a sample is accepted from a time source.
+4. **What strategy should be used to converge the reported UTC?** Each time the
+   estimated UTC changes as the result of a time sample, Timekeeper chooses how
+   to converge the UTC reported to clients with this new estimate. One option is
+   to immediately step the reported time. Another option is to slew the reported
+   time at some rate for some duration.
+5. **How should a sequence of time samples change the estimated frequency?** A
+   sequence of time samples may be used to estimate errors in a device’s
+   oscillator. Correcting for these frequency errors increases the accuracy of
+   the system.
+6. **What clock updates should be made?** Once a convergence strategy has been
+   selected it must be implemented by making one or more updates to the UTC
+   clock object. Estimated frequency changes should also update the UTC clock
+   object.
+
+In some other time synchronization systems (for example NTP) the same algorithm
+is used to answer more than one of these questions. This can make it difficult
+to clearly reason about the behavior of a particular feature, to avoid
+unintentional interactions between features, or to adjust components of the
+algorithm in response to new requirements. On Fuchsia we intentionally use
+separate algorithms to answer each question. This leads to a system that is
+simpler to develop, analyze, and debug and provides greater flexibility to
+change or reconfigure individual algorithms when supporting a wide range of
+different products.
+
+The interactions between these algorithms are summarized in Figure 1 below,
+where black elements are present in all configurations and grey elements are
+only present when an optional time source role is present.
+
+![This figure present a block diagram of algorithm interactions.](images/algorithm_block_diagram.png "Figure 1 - Algorithm Block Diagram")
+
+
+## Notation
+
+The following sections sometimes use equations to define portions of the
+algorithms. These equations are formatted in `code font` for ease of
+identification and use the following notation:
+
+* `|x|` The absolute value of x.
+* `sign(x)` The sign of x. Returns -1 if x if negative and +1 if x if zero or
+  positive.
+* `sqrt(x)`	The square root of x.
+* `sum(x)` The sum of property x over all points within some dataset.
+* `clamp(x,y,z)` The value x limited to be no less than y and no greater than z,
+	i.e. `clamp(x, y, z) = min(z, max(y, x))`
+
+Text in UPPER_CASE_SNAKE_CASE refers to configurable parameters that control the
+behavior of the algorithms. These parameters and their default values are
+discussed in [configurable parameters](#configurable_parameters).
+
+
+## Details
+
+### Should a time sample be accepted? {#sample_validation}
+
+Time samples are subjected to simple validity tests before being accepted:
+
+1. Any time sample received less than MIN_SAMPLE_INTERVAL after the previous
+   accepted time sample from the same source is rejected. This limits the
+   maximum resource utilization in Timekeeper as a result of time source bugs.
+2. Any time sample with a UTC earlier than backstop time is rejected. Accepting
+   a sample before backstop time could cause Timekeeper to place its estimate
+   of UTC earlier than backstop time.
+3. Any time sample with a monotonic time far from the current monotonic time is
+   an indication of an error. Time samples where monotonic time is in the future
+   or greater than MIN_SAMPLE_INTERVAL in the past will be rejected.
+4. When Timekeeper is configured with a gating source, any time sample from a
+   non-gating source where `|sample_utc - (gating_utc + 1/estimated_frequency *
+   (sample_monotonic - gating_monotonic))| > GATING_THRESHOLD` is rejected. In
+   this expression gating_utc and gating_monotonic refer to the UTC and
+   monotonic times in the most recently accepted sample from the gating source.
+   This check ensures that all other sources remain consistent with the gating
+   source.
+
+We considered whether to reject time samples that were inconsistent with current
+estimated UTC (either by being before current UTC or significantly after current
+UTC) but conclude this is undesirable. Any system that rejects new inputs based
+on the current estimate is vulnerable to permanent failure if its estimate
+becomes erroneous and in the case of two mismatched inputs there is no reason to
+believe the first is more reliable than the second. We prefer a system that can
+recover from an error, even at the cost of a significant jump in time, to one
+that cannot.
+
+### Which time source should be used? {#source_selection}
+
+As introduced in [UTC architecture](architecture.md), Timekeeper may be
+configured to use four different time source roles although it is unlikely that
+any given product will require more than two.
+
+The primary source, fallback source, and gating source (if these exist) may each
+be used to drive the primary time estimate and the externally visible clock.
+These sources are listed in decreasing order of preference; generally a primary
+time source would be more accurate but less available than a fallback source
+which in turn would be more accurate but less available than a gating source. A
+monitor source is subject to consistency checks against a gating source (if one
+exists) but is otherwise completely independent, driving a separate time
+estimate and an internal userspace clock that can be compared against the
+externally visible clock to assess the performance of the monitor time source.
+
+Time sources may take many minutes or hours to converge on an accurate solution
+so time sources are always launched when Timekeeper is initialized rather than
+only after the failure of some other time source.
+
+Timekeeper selects the time source based on reported status and the presence of
+time samples:
+
+1. The primary time source is used if its most recent status is healthy and its
+   most recent valid sample was within SOURCE_KEEPALIVE;
+2. Otherwise, the fallback time source is used if its most recent status is
+   healthy and its most recent valid sample was within SOURCE_KEEPALIVE;
+3. Otherwise, the gating time source is used if its most recent status is
+   healthy.
+
+*Note: As of Q4 2020 gating and fallback sources are not yet supported and
+therefore the time source selection algorithm has not yet been implemented.*
+
+We considered whether to include hysteresis or failure counting in the time
+source selection algorithm but since we intend to accommodate many failure modes
+internal to the time sources we expect the overall health of a time source will
+be reasonably stable. This means hysteresis would not add sufficient value to
+compensate for its additional complexity.
+
+### How should a time sample change the estimated UTC? {#update_estimate}
+
+Each valid sample received from the selected time source should be used to
+update Timekeeper’s estimate of UTC. Each of these samples contains some error
+and the size of this error varies between samples. The UTC estimate must
+therefore combine information from multiple samples rather than blindly
+following the most recent sample.
+
+This state estimation problem is commonly and efficiently solved using a
+[Kalman filter][1] in other domains. We define a simple two dimensional Kalman
+filter to maintain the Timekeeper UTC estimate where the two states are UTC time
+and oscillator frequency. Note that frequency is maintained external to the
+filter through the [frequency correction algorithm](#frequency_estimation);
+filter frequency is excluded from the Kalman filter’s measurement model and has
+a covariance of zero.
+
+The parameters in our filter are presented in Figure 2 below.
+
+![This figure presents the Kalman filter parameters and defines terms used.](images/kalman_filter.png "Figure 2 - Kalman Filter Parameters")
+
+Note that, as a result of the fixed frequency estimate and the spare process
+covariance matrix, only the upper left element of the state covariance matrix is
+ever non-zero. We apply a minimum bound of MIN_COVARIANCE on this value to
+prevent the filter from over-indexing on its internal state and rejecting new
+inputs.
+
+### What strategy should be used to converge the reported UTC? {#convergence_strategy}
+
+After each update to the estimated UTC, Timekeeper decides whether to
+immediately step the userspace clock to the new estimate or apply a rate
+correction to gradually slew the userspace clock to the new estimate. These
+options are illustrated in Figure 3 below.
+
+![This figure illustrates stepping and slewing a clock.](images/step_vs_slew.png "Figure 3 - Clock Correction by Step vs by Slew")
+
+Step changes in time can be disruptive for time clients and can lead to errors
+in their calculations so slewing is preferred whenever possible. Slewing is
+constrained to a maximum rate because slewing at a substantially unrealistic
+rate could also induce errors in clients; for example, slewing at double real
+time would be undesirable. Each slew operation is constrained to a maximum
+duration to bound the time over which the clock is known to be inaccurate; for
+example, slewing away an error of multiple hours over a period of two weeks
+would be undesirable.
+
+For small corrections we pick a preferred rate correction at which to perform a
+slew and apply this rate for as long as is necessary to achieve the correction.
+For larger corrections, once the slew duration would reach some maximum, we keep
+the slew duration fixed at this maximum and increase the rate correction. Once
+the rate correction would exceed some maximum we resort to stepping the time.
+
+In summary:
+
+1. If `|estimated_utc - clock_utc| > MAX_RATE_CORRECTION * MAX_SLEW_DURATION`,
+   make a step change to the clock;
+2. Otherwise, if `|estimated_utc - clock_utc| > PREFERRED_RATE_CORRECTION * MAX_SLEW_DURATION`,
+   apply a rate correction of `(estimated_utc - clock_utc)/MAX_SLEW_DURATION`
+   for a duration of `MAX_SLEW_DURATION`;
+3. Otherwise, apply a rate correction of
+   `sign(estimated_utc - clock_utc) * PREFERRED_RATE_CORRECTION` for a duration
+   of `|estimated_utc - clock_utc|/PREFERRED_RATE_CORRECTION`.
+
+### How should a sequence of time samples change the estimated frequency? {#frequency_estimation}
+
+A device’s oscillator is likely to have some inherent frequency error due to
+manufacturing imperfections. Estimating the oscillator frequency to account for
+this error can increase the accuracy of the UTC clock or reduce the frequency at
+which clock corrections must be made.
+
+Oscillator errors are bounded by specification to some small value (typically
+tens of parts per million) hence the UTC estimate algorithm above is stable and
+reliable even when a frequency estimate is unavailable; we consider the
+frequency estimate to be an enhancement that is beneficial but not necessary.
+
+The frequency estimate is refined over a much longer time period than the UTC
+estimate. This places the time constants of the two algorithms far apart to
+avoid potential interactions and ensures the frequency algorithm will not track
+transient errors such as a temperature driven error while a device is under
+heavy use. This high time constant means frequency errors would remain in the
+system for a long time and so we prefer making no improvement in the frequency
+estimate to introducing an error in the frequency estimate.
+
+Timekeeper estimates a period frequency for each FREQUENCY_ESTIMATION_WINDOW
+period providing all of the following conditions are met:
+
+1. At least FREQUENCY_ESTIMATION_MIN_SAMPLES time samples were accepted during
+   the period. This avoids placing an excessive weight on a small number of
+   samples if time was unavailable for most of the period.
+2. No step changes in time occurred during the period. A step change is evidence
+   of a significant error either before or after the step, we avoid these
+   periods to avoid incorporating this error.
+3. No part of the period falls within 12 hours of the UTC time at which a leap
+   second may have occurred. Some time sources indulge in leap second smearing
+   where they introduce a significant frequency error for a 24 hour period
+   rather than a step change. We avoid these windows to avoid incorporating this
+   frequency error. This affects a maximum of two 24 hour periods a year, fewer
+   if the system tracks whether the next leap second is scheduled.
+
+The period frequency is calculated as the gradient of a least squares linear
+regression over the (monotonic, utc) tuples for all time samples accepted in the
+period as illustrated in Figure 4 below.
+
+![This figure illustrates the frequency estimation process.](images/frequency_estimation.png "Figure 4 - Frequency Estimation Process")
+
+This is implemented using the following equation:
+
+```
+period_frequency = {sum(utc * monotonic) - sum(utc)*sum(monotonic)/n}
+                   / {sum(utc^2) - sum(utc)^2/n}
+```
+
+Where n is the number of accepted samples within the period. Note that
+MIN_SAMPLE_INTERVAL places an upper bounds on this value. The overall frequency
+estimate is calculated as the exponentially weighted moving average (EWMA) of
+the period frequencies. In all cases we constrain the final estimated frequency
+to be within double the standard deviation of the oscillator error such that no
+combination of the events could cause the frequency error to be wildly
+inaccurate to the point where it could impact the correctness of the Kalman
+filter. i.e.
+
+```
+estimated_frequency = clamp(
+    period_frequency * FREQUENCY_ESTIMATION_SMOOTHING +
+       previous_estimated_frequency * (1 - FREQUENCY_ESTIMATION_SMOOTHING),
+    1 - 2 * OSCILLATOR_ERROR_SIGMA,
+    1 + 2 * OSCILLATOR_ERROR_SIGMA)
+```
+
+EWMA provides a simple way to blend data across multiple periods while retaining
+minimal state.
+
+*Note: As of Q4 2020 the frequency estimation algorithm has not yet been
+implemented.*
+
+### What clock updates should be made? {#clock_updates}
+
+Changes to the rate and offset of the clock follow directly from selecting a
+convergence strategy and estimating the oscillator frequency as discussed in the
+previous two algorithms.
+
+In addition to rate and offset information,
+[`zx_clock_details_v1`](/zircon/system/public/zircon/syscalls/clock.h) also
+contains an error bound field. Clock error is composed of a symmetric error
+driven by uncertainty in the Kalman filter estimate plus an asymmetric error
+driven by the mismatch between the Kalman filter estimate and the reported clock
+time while a slew is in progress.
+
+While a slew is in progress the clock is continually approaching the estimate
+and therefore the error bound is continuously decreasing. To limit resource
+utilization Timekeeper only updates `zx_clock_details_v1.error_bound` when
+either some other clock update is required or when the error in the last
+reported value exceeds ERROR_BOUND_UPDATE. *Note: As at Q4 2020 the error_bound
+field is not yet populated.*
+
+In summary, Timekeeper makes updates to the UTC clock as follows:
+
+1. Step changes to the clock are always implemented as a single clock update.
+2. Clock slews are usually implemented as two clock updates: a rate change to
+   `1/estimated_frequency + rate_correction` when the slew is started followed
+   by a rate change to `1/estimated_frequency` after a delay of `slew_duration`.
+   If a subsequent update is accepted before this second clock change, the
+   second clock change is discarded.
+3. If a new frequency estimate is calculated while no clock slew is in progress,
+   the clock rate is changed to `1/estimated_frequency` (if a clock slew is in
+   progress, the clock update at the end of the slew picks up the new frequency
+   update). If `|last_set_error_bound - error_bound| > ERROR_BOUND_UPDATE`, the
+   error bound is updated.
+
+## Configurable Parameters
+
+The previous sections introduced a number of parameters that may be used to
+configure the behavior of the algorithms. The following tables provide more
+information about each of these parameters and justify the initial value we
+intend to use.
+
+### GATING_THRESHOLD
+
+The gating threshold limits how close time samples from non-gating sources must
+be to the UTC that the gating source indicates. This may be used to ensure less
+trusted sources are broadly consistent with a cryptographically verifiable time
+source.
+
+Units | Value | Rationale
+------|-------|-----------
+Nanoseconds | Not Yet Implemented | Not Yet Implemented
+
+### MIN_SAMPLE_INTERVAL
+
+The minimum sample interval bounds the maximum rate at which Timekeeper is
+willing to accept new samples from a time source in order to limit the
+Timekeeper resource utilization. Note this is relevant since in the
+`fuchsia.time.external.PushSource` protocol it is the time source that
+determines when time samples should be generated. This value is also used to
+apply an upper limit on the monotonic age of time samples.
+
+Units | Value | Rationale
+------|-------|-----------
+Nanoseconds | 60,000,000,000 (i.e. 1 minute) | In general we expect time sources to reduce the frequency of time samples as they converge on an accurate time, with time samples in a well calibrated system arriving tens of minutes apart. Accepting a sample every minute is much faster than the rate needed after convergence and roughly reflects the fastest rate we expect shortly after initialization. Processing one time sample per minute per time source would still mean Timekeeper was using a very small fraction of the overall resources and was not frequently spamming the log.
+
+### SOURCE_KEEPALIVE
+
+The source keepalive determines how frequently a source that declares itself to
+be healthy needs to produce samples in order to remain selected. If a time
+source fails to generate any time samples for this period and a source lower in
+the primary > fallback > gating hierarchy is available that other source will be
+used instead. This parameter is not used unless either a fallback or gating time
+source is configured.
+
+Units | Value | Rationale
+------|-------|-----------
+Nanoseconds | 3600,000,000,000 (i.e. 1 hour) | Time sources should mark themselves as unhealthy when there is some persistent reason they cannot supply time. This parameter should be viewed as a last resort that enables recovery in the event of time source lifecycle bugs. We pick a value that is longer than we expect the slowest time source will use for samples while healthy (and in doing so place a minimum rate requirement on time sources), but short enough that the system clock will not have diverged significantly by the time the error is detected. Over one hour a 25ppm oscillator might have diverged by 90ms, hence we use this as a reasonable initial value.
+
+### OSCILLATOR_ERROR_SIGMA
+
+The standard deviation of the system oscillator frequency error, used to control
+the growth in uncertainty during the prediction phase of the Kalman filter.
+
+Units | Value | Rationale
+------|-------|-----------
+Dimensionless | 0.000015 (i.e. 15 ppm) | Eventually this should be configurable per board to reflect the hardware specification. For now we default to a value that is typical for low end consumer hardware.
+
+### MIN_COVARIANCE
+
+Minimum covariance bounds the minimum uncertainty of the UTC estimate in the
+Kalman filter. This helps the filter not drink its own bathwater after receiving
+very low uncertainty samples from a time source.
+
+Units | Value | Rationale
+------|-------|-----------
+Nanoseconds squared | 1e12 (i.e. 1e-6 s^2) | This value represents a post-correction standard deviation of one millisecond. This value is lower than the filter achieves naturally with network time sources and therefore does not often come into play. If very high accuracy time sources such as GPS were used in the future it may be appropriate to lower the value somewhat.
+
+### MAX_RATE_CORRECTION
+
+Max rate correction bounds the fastest rate at which Timekeeper will
+deliberately adjust the clock frequency in order to slew away a UTC error. This
+is in addition to any clock frequency adjustment used to compensate for an
+oscillator frequency error.
+
+Units | Value | Rationale
+------|-------|-----------
+Dimensionless | 0.0002 (i.e. 200ppm) | 200ppm represents approximately one order of magnitude above the error rate that could be expected from a typical oscillator error. We believe this magnitude should be accommodated correctly by most clients and it is comfortably within the 1000ppm limit imposed by the kernel. When combined with MAX_SLEW_DURATION this value ensures a 1 second error may be removed by slewing. This is desirable to elegantly handle leap seconds and potential artifacts from time sources that only receive integer seconds.
+
+### MAX_SLEW_DURATION
+
+Max slew duration bounds the longest duration for which Timekeeper will apply a
+clock frequency adjustment in response to a single time sample. Consecutive time
+samples may introduce errors that each trigger a slew and therefore this
+per-sample duration does not constrain the total time over which Timekeeper may
+be in the process of slewing.
+
+Units | Value | Rationale
+------|-------|-----------
+Nanoseconds | 5400,000,000,000 (i.e. 1.5 hours) | The typical interval between time samples is tens of minutes. A 90 minute maximum implies a correction received in one time sample may result in a slew that spans the next handful of samples (each of these samples could also cause a modification of the slew) which feels appropriate. 90 minutes is long enough to achieve meaningful time corrections but short enough that any user-visible abnormalities associated with a slew or time error are not present over multiple hours. When combined with MAX_RATE_CORRECTION this value ensures a 1 second error may be removed by slewing. This is desirable to elegantly handle leap seconds and potential artifacts from time sources that only receive integer seconds.
+
+### PREFERRED_RATE_CORRECTION
+
+The rate at while Timekeeper adjusts the clock frequency to slew away small
+errors.This is in addition to any clock frequency adjustment used to compensate
+for an oscillator frequency error.
+
+Units | Value | Rationale
+------|-------|-----------
+Dimensionless | 0.00002 (i.e. 20ppm) | 20ppm is consistent with the error that might be observed from a typical oscillator and therefore must be accommodated by all time clients; there is little value in using a lower rate. 20ppm is high enough to correct typical moderate errors in a handful of minutes - a 10ms error would take 50 seconds to remove.
+
+### FREQUENCY_ESTIMATION_WINDOW
+
+The time period over which a set of time samples are collected to update the
+frequency estimate.
+
+Units | Value | Rationale
+------|-------|-----------
+Nanoseconds | 86,400,000,000,000 (i.e. 24 hours) | The value should be a multiple of 24 hours to avoid a bias from different temperatures in the diurnal cycle. Longer periods would lead to more opportunities to exclude or fail to complete a time period and therefore a less reliable frequency estimation process. 24 hours provides a sufficiently large number of samples to average and is sufficiently distant from the time constants in UTC estimation.
+
+### FREQUENCY_ESTIMATION_MIN_SAMPLES
+
+The minimum number of samples that must be received in a frequency estimation
+window for it to be eligible for a frequency estimate.
+
+Units | Value | Rationale
+------|-------|-----------
+Dimensionless | 12 | SOURCE_KEEPALIVE and FREQUENCY_ESTIMATION_WINDOW bound the minimum number of expected time samples to 24 for a device whose time source was healthy for the entire window. We require that half this number be received, meaning a device operating at the slowest legal sample rate must be online for half of the 24 hour window. This value ensures that each time sample has a limited contribution to the final average and therefore a small number of outliers may be accommodated.
+
+
+### FREQUENCY_ESTIMATION_SMOOTHING
+
+The factor applied to the current period during the exponentially weighted
+moving average calculation of frequency.
+
+Units | Value | Rationale
+------|-------|-----------
+Dimensionless | 0.25 | 0.25 sets a moderate (and somewhat arbitrary) bias towards the history during the EWMA calculation, with the historic frequency being weighted three times higher than the current period frequency. This damps the impact of any abnormal frequency periods and encourages a stable long term average.
+
+### ERROR_BOUND_UPDATE
+
+The minimum change in value that causes the error bound in UTC clock details to
+be updated even when no rate or offset changes are required. This parameter
+controls how frequently the clock must be updated while a significant slew is in
+progress.
+
+Units | Value | Rationale
+------|-------|-----------
+Nanoseconds | 200,000,000 (i.e. 200ms) | The maximum rate at which error that can be corrected by slewing is determined by MAX_RATE_CORRECTION. An error bound update of 200ms means one error bound correction is required every 16.7 minutes assuming no other time updates have arrived. This seems to be an appropriately low burden to maintain error bounds.
+
+
+[1]: https://www.cs.unc.edu/~welch/media/pdf/kalman_intro.pdf
diff --git a/docs/concepts/time/utc/architecture.md b/docs/concepts/time/utc/architecture.md
new file mode 100644
index 0000000..4f9dfa1
--- /dev/null
+++ b/docs/concepts/time/utc/architecture.md
@@ -0,0 +1,206 @@
+# UTC Architecture
+
+## Introduction
+
+Time synchronization for Fuchsia must be flexible: different products built on
+Fuchsia must be able to use different sources of time and these sources must be
+able to be reconfigured or replaced as these products evolve.
+
+This page defines the basic architecture we use to provide this flexibility: the
+components that are involved, the roles and responsibilities of these
+components, and the interactions between them.
+
+## Architecture
+
+The following diagram illustrates the components involved in time
+synchronization and the basic relationships between them.
+
+![This figure shows the Fuchsia UTC architecture.](images/utc_architecture.png)
+
+### Kernel
+
+The kernel defines the concept of a
+[clock object](/docs/reference/kernel_objects/clock.md) that may be used to
+track the progress of time. Each clock object is a one dimensional affine
+transformation of the clock monotonic reference timeline which may be adjusted
+by a userspace component (the "clock maintainer"), and observed by many other
+userspace components (the “clients”).
+
+### Component Manager
+
+Component Manager is responsible for creating the UTC clock and distributing it
+to other userspace components.
+
+Component Manager creates a kernel clock object for UTC time, setting the
+"backstop time" to the time of the last CL included in the build from by reading
+an output of the build process. Note that Component Manager does not start this
+UTC clock.
+
+Component Manager passes a read-only handle to the UTC clock to the component
+instances it starts and exposes the
+[`fuchsia.time.Maintenance`](https://fuchsia.dev/reference/fidl/fuchsia.time#Maintenance)
+service to distribute a read/write handle to the UTC clock. In a production
+system Timekeeper should be the only component with access to this service.
+
+### Time Clients
+
+Time clients are the users of UTC time on a Fuchsia device. Fuchsia’s
+implementation of libc uses the clock handle supplied by component manager to
+read UTC time, and therefore any component instance may act as a time client by
+using the standard time API provided by its runtime. See
+[language support](../language_support.md) for further details.
+
+Components that need deeper insight into the state of time synchronization may
+acquire the UTC clock handle and use it to call
+[`zx_clock_get_details`](/docs/reference/syscalls/clock_get_details.md) or wait
+on the `ZX_CLOCK_STARTED` signal.
+
+### Timekeeper
+
+Timekeeper is responsible for starting and maintaining the
+UTC clock in line with the sources and policy defined by the product.
+
+Timekeeper connects to the
+[`fuchsia.time.Maintenance`](https://fuchsia.dev/reference/fidl/fuchsia.time#Maintenance)
+service to acquire a writable handle to the UTC clock on launch. It launches and
+connects to the time source component(s) configured by the product and connects
+to
+[`fuchsia.time.external.PushSource`](https://fuchsia.dev/reference/fidl/fuchsia.time.external#PushSource)
+or
+[`fuchsia.time.external.PullSource`](https://fuchsia.dev/reference/fidl/fuchsia.time.external#PullSource)
+to receive time synchronization information from each of these sources. *Note:
+As of Q4 2020 time sources are hardcoded in Timekeeper and cannot yet be
+configured by product. PullSource is not yet supported.*
+
+For a particular product, each time source is configured to play
+one of four different roles:
+
+1. **Primary**: A primary source is used to maintain the UTC clock whenever it is
+   both available and consistent with any gating source (i.e. the time reported
+   by the primary source and gating source are within some upper bound).
+2. **Fallback**: A fallback source is used to maintain the UTC clock when the
+   primary source is unavailable or inconsistent, provided the fallback source
+   is both available and consistent with any gating source.
+3. **Gating**: A gating source is used to provide a validity check on another
+   (usually more accurate but less trusted) time source. The gating source is
+   used to determine whether a primary or fallback source should be used. When
+   neither a primary nor fallback source are available (or when they are
+   available but inconsistent) a gating source may be used to maintain the UTC
+   clock.
+4. **Monitor**: A monitor source is never used to maintain the UTC clock.
+   Metrics are recorded that allow the performance of the monitor source to be
+   assessed, making monitors a way to safely test or "dark launch" new or
+   modified algorithms.
+
+Although this is a flexible system that could support many sources, we do not
+expect most products will require more than two different time sources, with
+Primary-only, Primary+Fallback, Primary+Gating, and Primary+Monitor being the
+common configurations. *Note: As of Q4 2020 only Primary and Monitor sources
+are supported*.
+
+Timekeeper has sole discretion in applying the information from time sources to
+the UTC clock. It may discard updates that have a significant uncertainty or
+that appear to be outliers. When a significant clock correction needs to be
+applied Timekeeper must balance three conflicting desires:
+
+1. Step changes in time should be avoided if possible.
+2. The frequency adjustment used to apply a correction by slewing should not be
+   excessive.
+3. The time taken to apply a correction by slewing should not be excessive.
+
+Timekeeper is the central authority for performance of the hardware oscillator
+on a device. It tracks the observed oscillator error and applies UTC corrections
+to accommodate for this error. Timekeeper uses stash to store oscillator error
+across power cycles. *Note: As of Q4 2020 the full frequency correction
+algorithm is not yet implemented*.
+
+On devices that have a real time clock (RTC), Timekeeper is responsible for
+reading the RTC during initialization and periodically updating the RTC to
+reflect the UTC clock.
+
+### Time Sources
+
+Each time source component is responsible for supplying information that
+could be used to synchronize UTC based on one or more remote sources that the
+time source communicates with. Each time source component exposes either the
+[`fuchsia.time.external.PushSource`](https://fuchsia.dev/reference/fidl/fuchsia.time.external#PushSource)
+or
+[`fuchsia.time.external.PullSource`](https://fuchsia.dev/reference/fidl/fuchsia.time.external#PullSource)
+service (or both).
+
+Many time sources communicate with servers over a network using some protocol
+capable of time synchronization, such as NTP, Roughtime, or HTTPS (see
+[HTTPSDate time source](/src/sys/time/httpsdate_time_source)). Some time sources
+could instead communicate with local hardware such as a GPS receiver or VLF
+receiver.
+
+A time source component encapsulates knowledge of the
+protocol used to interact with the remote source and is responsible for
+following the rules of this protocol. This means that where multiple remote
+sources use the same protocol, a single time source component instance is
+usually responsible for communicating with all of them and implementing any
+cross-remote-source requirements in the protocol.
+
+Each time source should be independent of the others, providing flexibility in
+how time sources are installed and avoiding complex failure modes caused by the
+interaction between multiple time sources. This means a time source should never
+use system UTC directly in its implementation (because the system UTC may have
+incorporated inputs from other time sources). Instead, all time sources use the
+system monotonic clock for their reference time.
+
+## Interfaces
+
+### Time Source to Timekeeper
+
+Time sources supply time updates to Timekeeper.
+
+Each time update is expressed as a correspondence pair between the monotonic
+clock at the time the update was most valid and the UTC determined by the
+time source. Sending updates as a correspondence pair rather than an absolute
+time means the FIDL latency between the time source and Timekeeper does not
+directly impact accuracy, and clearly communicates the monotonic time at which
+the update was most valid. In addition to the correspondence pair, the time
+source also sends a standard deviation to convey the uncertainty associated
+with the update.
+
+Time sources may support two different modes of operation:
+
+* In **push mode** the time source determines when a time update should be
+  generated and autonomously sends these updates to Timekeeper.
+* In **pull mode** Timekeeper determines when a time update should be generated
+  and requests these updates from the time source.
+
+Push mode is preferred in nearly all circumstances since the time source has
+better knowledge of timing constraints in the protocol, remote resource
+utilization, and the availability of any dependencies, letting it make more
+appropriate decisions. Pull mode may be appropriate for trivial time sources or
+when a time source is used very infrequently (e.g. every few hours) as a gating
+source. When pull mode is used a time source may reject time update requests,
+for example if a request would violate a maximum rate constraint in the
+protocol. For time sources that support both modes, Timekeeper determines which
+mode to use and connects to the corresponding service. A time source may choose
+to generate updates less frequently or not at all when no connections are open
+to these services.
+
+In push mode Timekeeper has no knowledge of when a time update should occur so
+cannot infer a failure from the absence of a successful update: in push time
+mode a time source also provides its overall health state, letting Timekeeper
+make better choices about when to switch time sources.
+
+### Timekeeper to Time Source
+
+Timekeeper supplies global information that may aid a time source in generating
+time updates, including:
+
+* The frequency tolerance of the oscillator.
+* The observed oscillator frequency.
+
+Providing these data ensures time sources do not need to duplicate Timekeeper
+functionality. *As of Q4 2020 no time sources have required these data so the
+API has not yet been defined*.
+
+Timekeeper does not provide information about the global synchronization state
+of the system, or about whether updates from a particular source will actually
+be used to maintain the system clock. This intentional decision avoids creating
+feedback loops between time sources and ensures that a time source will behave
+consistently across roles.
diff --git a/docs/concepts/time/utc/behavior.md b/docs/concepts/time/utc/behavior.md
new file mode 100644
index 0000000..f7b40fc
--- /dev/null
+++ b/docs/concepts/time/utc/behavior.md
@@ -0,0 +1,25 @@
+# UTC Behavior
+
+TODO([65784](https://fxbug.dev/65784)): Populate this page to document the
+behavior that users of the UTC clock can expect, including the following points
+as a minimum:
+
+* How to get a clock handle.
+* Clock handle is a process variable, if the parent didn’t supply it it won’t be
+  available.
+* Under no circumstances will the clock ever be earlier than backstop.
+* Until UTC is available the clock will remain fixed at the backstop time.
+* ZX_CLOCK_STARTED may be waited on to learn when time is available.
+* The clock may step either forward or backward and there is no upper limit on
+  the size of this step.
+* Steps should be rare on most devices, except for the initial step from
+  backstop time to the accurate time when the clock is first synchronized.
+* The clock may run at a different rate than monotonic to compensate for
+  oscillator errors or to gradually remove small errors, should never exceed a
+  few hundred ppm in either direction.
+* How to get clock details.
+* Definition of error bound in clock details (when we agree this).
+* Accuracy will vary from product to product but should be accurate to within a
+  few hundreds of milliseconds on most devices most of the time.
+* What secure time means (when we agree this).
+* How to tell when time is secure (when we agree this).
diff --git a/docs/concepts/time/utc/images/algorithm_block_diagram.png b/docs/concepts/time/utc/images/algorithm_block_diagram.png
new file mode 100644
index 0000000..1a9376d
--- /dev/null
+++ b/docs/concepts/time/utc/images/algorithm_block_diagram.png
Binary files differ
diff --git a/docs/concepts/time/utc/images/frequency_estimation.png b/docs/concepts/time/utc/images/frequency_estimation.png
new file mode 100644
index 0000000..5368989
--- /dev/null
+++ b/docs/concepts/time/utc/images/frequency_estimation.png
Binary files differ
diff --git a/docs/concepts/time/utc/images/kalman_filter.png b/docs/concepts/time/utc/images/kalman_filter.png
new file mode 100644
index 0000000..32fc01f
--- /dev/null
+++ b/docs/concepts/time/utc/images/kalman_filter.png
Binary files differ
diff --git a/docs/concepts/time/utc/images/step_vs_slew.png b/docs/concepts/time/utc/images/step_vs_slew.png
new file mode 100644
index 0000000..b51ac46
--- /dev/null
+++ b/docs/concepts/time/utc/images/step_vs_slew.png
Binary files differ
diff --git a/docs/concepts/time/utc/images/utc_architecture.png b/docs/concepts/time/utc/images/utc_architecture.png
new file mode 100644
index 0000000..72f4847
--- /dev/null
+++ b/docs/concepts/time/utc/images/utc_architecture.png
Binary files differ
diff --git a/docs/concepts/time/utc/overview.md b/docs/concepts/time/utc/overview.md
new file mode 100644
index 0000000..b7ce025
--- /dev/null
+++ b/docs/concepts/time/utc/overview.md
@@ -0,0 +1,32 @@
+# UTC Time
+
+UTC time is the system’s best estimate of [Coordinated Universal Time][1].
+
+[Architecture](architecture.md) covers the overall architecture of the time
+synchronization system including the roles and responsibilities of each
+component involved.
+
+On boards with a Real Time Clock (RTC), the system initializes UTC time from the
+RTC shortly after power on. On all boards, the system periodically updates UTC
+from one or more network time sources once a network is available. In principle,
+these time sources could be configured separately for each product, but
+currently all products source UTC from google.com using
+[HTTPSdate](/src/sys/time/httpsdate_time_source/README.md).
+
+[Algorithms](algorithms.md) covers the time synchronization algorithms
+implemented by the central component in the time synchronization system,
+[Timekeeper](/src/sys/time/timekeeper).
+
+UTC is distributed to components using a
+[kernel clock object](/docs/reference/kernel_objects/clock.md), with each
+process holding a handle to this clock object (providing the parent process
+supplied this clock). If you are developing components that use UTC, you should
+read [UTC behavior](behavior.md) to learn how to acquire UTC time and understand
+the behavior the UTC clock may exhibit both before and after time
+synchronization.
+
+Previously the kernel maintained an additional internal UTC clock that could be
+accessed through [`zx_clock_get`](/docs/reference/syscalls/clock_get.md). This
+clock is deprecated and should no longer be used.
+
+[1]: https://en.wikipedia.org/wiki/Coordinated_Universal_Time
\ No newline at end of file
diff --git a/docs/contribute/_toc.yaml b/docs/contribute/_toc.yaml
index cb12c28..3e51d25 100644
--- a/docs/contribute/_toc.yaml
+++ b/docs/contribute/_toc.yaml
@@ -8,16 +8,19 @@
 toc:
 - title: "Contributing to Fuchsia"
   path: /CONTRIBUTING.md
+- title: "Fuchsia roadmap"
+  path: /docs/contribute/roadmap.md
 - title: "Code of conduct"
   path: /CODE_OF_CONDUCT.md
+- title: "Respectful code"
+  path: /docs/contribute/respectful_code.md
 - title: "Contributing to Zircon"
   path: /docs/contribute/contributing_to_zircon.md
 - title: "Contributing to FIDL"
   section:
   - include: /docs/contribute/contributing-to-fidl/_toc.yaml
-- title: "Best practices"
-  section:
-  - include: /docs/contribute/best-practices/_toc.yaml
 - title: "Documentation"
   section:
   - include: /docs/contribute/docs/_toc.yaml
+- title: "Report an issue"
+  path: /docs/contribute/report-issue.md
diff --git a/docs/contribute/best-practices/_toc.yaml b/docs/contribute/best-practices/_toc.yaml
deleted file mode 100644
index a1f435b..0000000
--- a/docs/contribute/best-practices/_toc.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-# Copyright 2020 The Fuchsia Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# Please, read https://fuchsia.dev/fuchsia-src/community/contribute/docs/documentation_navigation_toc
-# before making changes to this file, and add a member of the fuchsia.dev
-# team as reviewer.
-toc:
-- title: "Respectful code"
-  path: /docs/contribute/best-practices/respectful_code.md
-- title: "Update channel usage policy"
-  path: /docs/contribute/best-practices/update_channel_usage_policy.md
diff --git a/docs/contribute/community/_toc.yaml b/docs/contribute/community/_toc.yaml
new file mode 100644
index 0000000..6f2eae90
--- /dev/null
+++ b/docs/contribute/community/_toc.yaml
@@ -0,0 +1,14 @@
+# Copyright 2020 The Fuchsia Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Please, read https://fuchsia.dev/fuchsia-src/community/contribute/docs/documentation_navigation_toc
+# before making changes to this file, and add a member of the fuchsia.dev
+# team as reviewer.
+toc:
+- title: "Overview"
+  path: /docs/contribute/community/index.md
+- title: "Get involved"
+  path: /docs/contribute/community/get-involved.md
+- title: "Contributor roles"
+  path: /docs/contribute/community/contributor-roles.md
diff --git a/docs/contribute/community/contributor-roles.md b/docs/contribute/community/contributor-roles.md
new file mode 100644
index 0000000..4d3d99f
--- /dev/null
+++ b/docs/contribute/community/contributor-roles.md
@@ -0,0 +1,397 @@
+# Fuchsia contributor roles
+
+## Overview {:#overview}
+
+This document defines the roles associated with contributing to
+the Fuchsia project.
+
+## Principles {:#principles}
+
+Roles within the Fuchsia project seek to embody the following principles:
+
+*   _Transparency._ We are transparent and open about roles and requirements.
+*   _Inclusivity_. Fuchsia lets anyone contribute to the project, regardless of
+    their employer.We believe contributions from a diverse, open-source
+    community are critical to improving Fuchsia.
+*   _Responsibility_. Roles and privileges can be revoked if a person no
+    longer meets the requirements.
+
+## Roles {:#roles}
+
+The following are the contributor roles associated with the Fuchsia project:
+
+*   [Member](#member)
+*   [Committer](#committer)
+*   [Owner](#owner)
+*   [Global Approver](#global-approver)
+
+### Member {:#member}
+
+Anyone who contributes to the project by providing patches to code or
+documentation, and agrees to the Google Developers' [Contributor License Agreement](https://cla.developers.google.com/){:.external}.
+
+#### Responsibilities {:#responsibilities}
+
+Members are responsible for acting in accordance with
+the [Fuchsia Code of Conduct](/CODE_OF_CONDUCT.md).
+
+#### Become a Member {:#become-a-member}
+
+To become a Member you must do the following:
+
+*   Sign the Google Developers' [Contributor License Agreements](https://cla.developers.google.com/){:.external}.
+*   Acknowledge the [Fuchsia Code of Conduct](/CODE_OF_CONDUCT.md).
+
+### Committer {:#committer}
+
+A Committer is a person who has write access to the
+[Fuchsia repository](https://fuchsia.googlesource.com/){:.external}. A Committer can submit
+their own Gerrit changes or Gerrit changes from any other member.
+
+A Committer is not just someone who can make changes, but also someone who
+demonstrated the ability to collaborate effectively with other Members of the
+Fuchsia community. Example collaboration activities include but are not limited
+to:
+
+*   Seeking out the most knowledgeable people to review their code changes.
+*   Contributing high-quality, well-tested code.
+*   Fixing bugs in code or tests.
+
+Members can become Committers with different kinds of contributions. For
+instance, those working on documentation or toolchain can meet the requirements
+to become Committers by contributing high-quality documentation or configuration
+changes, which would not meet the “traditional” bar for well-tested code.
+
+In order to submit Gerrit changes, Committers need to either be [Owners](#owner)
+of the affected files or receive approval from an Owner of the affected files.
+
+#### Responsibilities {:#responsibilities}
+
+Committers are responsible for the following:
+
+*   Ensuring that the code submitted to Fuchsia by Committers is tested
+according to the [Testability Rubrics](/docs/concepts/testing/testability_rubric.md).
+*   Ensuring that the code submitted to Fuchsia by Committers follows testing
+best practices.
+
+#### Become a Committer {:#become-a-committer}
+
+To become a Committer you must do the following:
+
+*   Contribute 10 non-trivial patches to the project, demonstrating the ability
+to write high-quality, well-tested code.
+*   Be nominated by a current Committer.
+*   Obtain reviews and approvals of those 10 non-trivial patches from at least
+2 different Committers.
+*   Ensure that your nomination is supported by 3 other Committers.
+*   Ensure that your nomination is not blocked by any Committer.
+
+Committer nominations are evaluated within seven business days of the initial
+nomination request.
+
+### Owner {:#owner}
+
+An Owner is responsible for files or directories within the Fuchsia project and
+has comprehensive knowledge of the code in that subtree. Owners are listed in
+`OWNERS` files. For directories or files that are outside of an Owner's
+responsibility, that Owner has the same privileges as a Committer.
+
+#### Responsibilities {:#responsibilities}
+
+In addition to the responsibilities of a Committer and Member, Owners
+are responsible for the following:
+
+*   Nominating other Owners.
+*   Approving or removing other Owners.
+*   Provide high-quality reviews and design feedback.
+*   Approve changes for code in their subtree.
+
+#### Become an Owner {:#become-an-owner}
+
+To become an Owner you must do the following:
+
+*   Be a [Committer](#become-a-committer).
+*   Submit a substantial number of non-trivial changes to the affected subtree.
+*   Provide high-quality reviews and code design feedback.
+*   Provide code reviews in a timely manner.
+*   Self-nominate or be nominated by another Committer.
+    *   To self-nominate, [submit a Gerrit change](/docs/development/source_code/contribute_changes.md)
+    that adds yourself to the `OWNERS` file of your desired repository.
+    Current Owners will evaluate your change and either accept or reject your
+    request.
+
+### Global Approver {:#global-approver}
+
+A Global Approver is an Owner in the [root `OWNERS` file](/OWNERS){:.external}.
+A Global Approver often makes large-scale changes that affect the entire Fuchsia
+codebase. For example, Global Approvers are people who tend to maintain
+various languages, toolchains, and other build system components.
+
+For the full set of Global Approver expectations as well as the list of current
+Global Approvers, see [the root `OWNERS` file](/OWNERS){:.external}.
+
+While Global Approvers are empowered to provide a [Code-Review +2](https://gerrit-review.googlesource.com/Documentation/config-labels.html){:.external}
+to large-scale changes, Global Approvers are not expected to have comprehensive
+knowledge of the entire Fuchsia codebase.
+
+#### Responsibilities {:#responsibilities}
+
+In addition to the responsibilities of a Member, Committer, and Owner, Global
+Approvers are responsible for the following:
+
+*   Approving large scale changes within the Fuchsia codebase with a +2
+    in Gerrit.
+*   Providing timely reviews for large scale changes.
+
+#### Become a Global Approver {:#become-a-global-approver}
+
+To become a Global Approver you must do the following:
+
+*   Demonstrate considerable proficiency in making large-scale changes across
+    the entire Fuchsia codebase.
+*   Self-nominate or get nominated by another Committer.
+    *  To self-nominate, do the following:
+        * [Submit a Gerrit change](/docs/development/source_code/contribute_changes.md)
+          that adds yourself to the [root `OWNERS` file](/OWNERS){:.external}.
+          Current Owners will evaluate your change and either accept or reject your
+          request.
+        * Email all [existing Global Approvers](/OWNERS){:.external}
+          with your associated Gerrit change and wait one business day for discussion
+          and approval. If you are being nominated, existing Global Approvers will be
+          emailed by the individual nominating you.
+
+## Code review actions {:#code-review-actions}
+
+The types of code review actions you can provide depend on your role within
+the Fuchsia project.
+
+### Initiate a CQ Dry Run {:#initiate-a-cq-dry-run}
+
+A CQ Dry Run runs your change against the available tests in the Commit Queue.
+Committers, Owners, and Global Approvers can initiate a CQ Dry Run.
+
+### Score code reviews  {:#score-code-reviews}
+
+#### Code Review {:#code-review}
+
+After you request a code review, reviewers can score your change.
+
+Reviewers can label your change with a score of **-2, -1, 0, +1,** **or +2**.
+For more information on review label definitions, see [Gerrit Code Review - Review Labels](https://gerrit-review.googlesource.com/Documentation/config-labels.html){:.external}.
+
+Committers, Owners, and Global Approvers can score code reviews but only a
+Global Approver or repository Owner can provide a **+2**.
+
+### Submit approved changes {:#submit-approved-changes}
+
+You need a **Code Review Label +2** to submit your change. A
+**Code-Review Label +2** score can only be applied by a repository Owner or
+Global Approver.
+
+When a change is submitted, the change is moved to the Commit Queue (CQ).
+The Commit Queue verifies, commits, and merges changes to the master branch.
+
+## Role matrix {:#role-matrix}
+
+This table summarizes the actions that each Fuchsia contributor role can
+perform.
+
+<table>
+  <tr>
+   <td><strong>Role</strong>
+   </td>
+   <td><strong>Create Change</strong>
+   </td>
+   <td><strong>Code-Review another Committer’s change</strong>
+   </td>
+   <td><strong>Provide Code-Review +2</strong>
+   </td>
+   <td><strong>Provide CQ+1 (dry run of CQ)</strong>
+   </td>
+   <td><strong>Submit Approved Change to CQ</strong>
+   </td>
+   <td><strong>Add or remove Owners</strong>
+   </td>
+  </tr>
+  <tr>
+   <td>Member
+   </td>
+   <td>Yes
+   </td>
+   <td><strong>No</strong>
+   </td>
+   <td><strong>No</strong>
+   </td>
+   <td><strong>No</strong>
+   </td>
+   <td><strong>No</strong>
+   </td>
+   <td><strong>No</strong>
+   </td>
+  </tr>
+  <tr>
+   <td>Committer
+   </td>
+   <td>Yes
+   </td>
+   <td>Yes
+   </td>
+   <td><strong>No</strong>
+   </td>
+   <td>Yes
+   </td>
+   <td>Yes
+   </td>
+   <td><strong>No</strong>
+   </td>
+  </tr>
+  <tr>
+   <td>Owner (outside owned subtree)
+   </td>
+   <td>Yes
+   </td>
+   <td>Yes
+   </td>
+   <td><strong>No</strong>
+   </td>
+   <td>Yes
+   </td>
+   <td>Yes
+   </td>
+   <td><strong>No</strong>
+   </td>
+  </tr>
+  <tr>
+   <td>Owner (in own subtree)
+   </td>
+   <td>Yes
+   </td>
+   <td>Yes
+   </td>
+   <td>Yes
+   </td>
+   <td>Yes
+   </td>
+   <td>Yes
+   </td>
+   <td>Yes
+   </td>
+  </tr>
+  <tr>
+   <td>Global Approver
+   </td>
+   <td>Yes
+   </td>
+   <td>Yes
+   </td>
+   <td>Yes
+   </td>
+   <td>Yes
+   </td>
+   <td>Yes
+   </td>
+   <td>Yes
+   </td>
+  </tr>
+</table>
+
+## Life of a change {:#life-of-a-change}
+
+The following diagram depicts the high-level stages of what happens to a change
+after its pushed to Gerrit.
+
+![alt_text](/docs/contribute/community/images/change-resolution.png "Change approval process")
+
+## Specialized roles {:#specialized-roles}
+
+Areas within the Fuchsia repository may have their own unique requirements,
+defining their own sets of roles and responsibilities, in addition to the ones
+detailed above.
+
+### API Reviewer {:#api-reviewer}
+
+An API Reviewer is accountable for the quality and long-term
+health of the [Fuchsia API Surface](/docs/glossary.md#fuchsia-api-surface).
+API Reviewers collectively form the API
+Council.
+
+Any change that modifies the Fuchsia API Surface must receive an **API-Review+1**.
+from a member of API Council in addition to the usual **Code-Review+2**.
+
+For more details about the responsibilities of an API Reviewer and how the API
+Council operates, see the [API Council Charter](/docs/contribute/governance/api_council.md).
+
+#### API Reviewer membership {:#api-reviewer-membership}
+
+To become an API Reviewer you must do the following:
+
+*   Be a [Committer](#committer).
+*   Demonstrate good judgement about the quality and long-term health of APIs.
+*   Be appointed by the functional area of the Fuchsia project, as per the [API Council Charter](/docs/contribute/governance/api_council.md#membership).
+
+### Eng Council member {:#eng-council-member}
+
+The Fuchsia Eng Council is a small group of senior technical leaders responsible
+for providing a coherent technical vision for Fuchsia. The Eng Council largely
+operates by delegation and ratification, communicating engineering standards,
+values, and objectives throughout the community and then reviewing and ratifying
+concrete engineering proposals from project contributors.
+
+#### Eng Council membership {:#eng-council-membership}
+
+There is no predetermined number of people on the Eng Council. However, in order
+to provide a coherent technical vision, the council has a small number of
+members. Eng Council members are appointed by the governing authority for the
+project.
+
+For more details about the responsibilities of an API Reviewer and how the API
+Council operates, see the [Fuchsia Eng Council Charter](/docs/contribute/governance/eng_council.md).
+
+## Revoking Privileges {:#revoking-privileges}
+
+When contributors no longer meet requirements, their role and
+corresponding privileges can be revoked.
+
+### Scenarios {:#scenarios}
+
+Example scenarios for having privileges revoked include, but are not limited to,
+the following:
+
+*   Not acting in accordance with the [Fuchsia Code of Conduct](/CODE_OF_CONDUCT.md).
+*   Committers repeatedly ignoring testability best-practices in their code
+    reviews.
+*   Owners discouraging people from requesting code reviews.
+*   Owners being unresponsive to review requests.
+
+### Process {:#process}
+
+The process for revoking an individual’s role within the Fuchsia project
+involves the following steps:
+
+*   An Owner makes a recommendation to `community-managers@fuchsia.dev` to
+    revoke someone’s role, specifying the rationale. Revoking an Owner role
+    needs to be approved by an Owner in the same subtree
+    or above.
+    * Ownership is often revoked when an Owner is no longer actively
+      contributing to their associated files or directories.
+
+Revoking a Committer role should be a rare action and requires approval by the
+governance authority. Community managers should be involved in the process of
+revoking the Committer role.
+
+## Frequently asked questions {:#frequently-asked-questions}
+
+As a Fuchsia Member, you might have the following questions about requesting a
+code review:
+
+*   Who can provide a **Code Review +1**?
+    * All Committers, Owners, and Global Approvers. Code Review +1 means
+    “Looks Good To Me” but a +1 alone doesn’t allow for submission.
+    Someone else has to approve the change with a +2. For more information on
+    review label definitions see, [Gerrit Code Review - Review Labels](https://gerrit-review.googlesource.com/Documentation/config-labels.html){:.external}.
+*   Can specific portions of the Fuchsia source code have different requirements?
+    * Yes. For example, API changes have special requirements as described in
+     the [Fuchsia API Council Charter](/docs/contribute/governance/api_council.md#api_review).
+*   Do I need **API-Review +1**?
+    * Changes affecting the Fuchsia API surface require **API-Review +1**, and the
+    code review tool will only show the API-Review flag when it is needed.
diff --git a/docs/contribute/community/get-involved.md b/docs/contribute/community/get-involved.md
new file mode 100644
index 0000000..cbc9976c
--- /dev/null
+++ b/docs/contribute/community/get-involved.md
@@ -0,0 +1,258 @@
+# Get involved with Fuchsia
+
+This document describes the different ways you can get involved
+and stay informed about Fuchsia.
+
+## Review the Fuchsia roadmap
+
+The Fuchsia project prioritizes features and enhancements that support our
+development community. Read the [Fuchsia roadmap](/docs/contribute/roadmap.md)
+to get a high-level understanding of the various projects being considered
+within Fuchsia.
+
+## Understand the Request for Comments (RFC) process
+
+The Requests for Comments (RFC) process is intended to provide a consistent and
+transparent path for making project-wide technical decisions.
+
+RFCs are documents that inform and build on the technical direction of the
+Fuchsia project. RFCs can be proposed by anyone in the community. Using an RFC,
+a Fuchsia community member can submit an idea or technical concept for broader
+review or consideration. For more information, see [Fuchsia RFCs](/docs/contribute/governance/rfcs).
+
+### Review completed RFCs
+
+To see a complete list of accepted and rejected RFC proposals, see [Accepted proposals](/docs/contribute/governance/rfcs#accepted)
+and [Rejected proposals](/docs/contribute/governance/rfcs#rejected).
+
+### Track ongoing RFCs
+
+To stay up-to-date with RFCs that are still in progress, you can bookmark this [saved search](https://fuchsia-review.googlesource.com/q/dir:docs/contribute/governance/rfcs+is:open)
+that lists any open Gerrit change in the `rfcs` directory.
+
+### Create an RFC
+
+If you want to create a proposal that you want the Fuchsia Eng Council to
+review, see [Creating an RFC.](/docs/contribute/governance/rfcs/create_rfc.md)
+
+## Join a mailing list
+
+Stay informed about the Fuchsia project by joining mailing lists.
+
+### General discussion
+
+The following are general Fuchsia mailing lists:
+
+
+<table>
+  <tr>
+   <td><strong>Mailing list</strong>
+   </td>
+   <td><strong>Purpose</strong>
+   </td>
+  </tr>
+  <tr>
+   <td><a href="https://groups.google.com/a/fuchsia.dev/g/announce">announce</a>
+   </td>
+   <td>A read-only list for announcements about Fuchsia.
+   </td>
+  </tr>
+  <tr>
+   <td><a href="https://groups.google.com/a/fuchsia.dev/g/discuss">discuss</a>
+   </td>
+   <td>For general conversation about Fuchsia.
+   </td>
+  </tr>
+  <tr>
+   <td><a href="https://groups.google.com/a/fuchsia.dev/g/testability-discuss">testability-discuss</a>
+   </td>
+   <td>Discussion list for general conversation about testable code and Fuchsia’s testability rubrics.
+   </td>
+  </tr>
+</table>
+
+### Topic discussion
+
+The following mailing lists are discussion channels for specific areas of
+Fuchsia:
+
+<table>
+  <tr>
+   <td><strong>Mailing list</strong>
+   </td>
+   <td><strong>Purpose</strong>
+   </td>
+  </tr>
+  <tr>
+   <td><a href="https://groups.google.com/a/fuchsia.dev/g/a11y-dev">a11y-dev</a>
+   </td>
+   <td>For conversation about accessibility on Fuchsia.
+   </td>
+  </tr>
+  <tr>
+   <td><a href="https://groups.google.com/a/fuchsia.dev/g/audio-dev">audio-dev</a>
+   </td>
+   <td>For conversation about audio on Fuchsia.
+   </td>
+  </tr>
+  <tr>
+   <td><a href="https://groups.google.com/a/fuchsia.dev/g/camera-dev">camera-dev</a>
+   </td>
+   <td>Discussions about the development of <a href="/reference/fidl/fuchsia.camera/index.md">Fuchsia's camera APIs</a>.
+   </td>
+  </tr>
+  <tr>
+   <td><a href="https://groups.google.com/a/fuchsia.dev/g/component-framework-dev">component-framework-dev</a>
+   </td>
+   <td>For conversation about <a href="/docs/glossary.md#component-framework">Component Framework</a>.
+   </td>
+  </tr>
+  <tr>
+   <td><a href="https://groups.google.com/a/fuchsia.dev/g/connectivity-dev">connectivity-dev</a>
+   </td>
+   <td>For conversation about connectivity and networking on Fuchsia.
+   </td>
+  </tr>
+  <tr>
+   <td><a href="https://groups.google.com/a/fuchsia.dev/g/drivers-dev">drivers-dev</a>
+   </td>
+   <td>For conversation about <a href="/docs/glossary.md#driver">Drivers</a>.
+   </td>
+  </tr>
+  <tr>
+   <td><a href="https://groups.google.com/a/fuchsia.dev/g/fidl-dev">fidl-dev</a>
+   </td>
+   <td>For conversation about <a href="/docs/glossary.md#fidl">FIDL</a>.
+   </td>
+  </tr>
+  <tr>
+   <td><a href="https://groups.google.com/a/fuchsia.dev/g/graphics-dev">graphics-dev</a>
+   </td>
+   <td>For conversation about graphics on Fuchsia.
+   </td>
+  </tr>
+  <tr>
+   <td><a href="https://groups.google.com/a/fuchsia.dev/g/intl-dev">intl-dev</a>
+   </td>
+   <td>For conversation about internationalization and localization on Fuchsia.
+   </td>
+  </tr>
+  <tr>
+   <td><a href="https://groups.google.com/a/fuchsia.dev/g/media-dev">media-dev</a>
+   </td>
+   <td>For conversation about media and playback on Fuchsia.
+   </td>
+  </tr>
+  <tr>
+   <td><a href="https://groups.google.com/a/fuchsia.dev/g/pkg-dev">pkg-dev</a>
+   </td>
+   <td>For conversation about <a href="/docs/glossary.md#fuchsia-package">Fuchsia Packages</a>.
+   </td>
+  </tr>
+  <tr>
+   <td><a href="https://groups.google.com/a/fuchsia.dev/g/process-runtime-dev">process-runtime-dev</a>
+   </td>
+   <td>Working group for low-level runtime work shared between the <a href="/docs/glossary.md#zircon">Zircon</a>, Core Platform, and Toolchain teams.
+   </td>
+  </tr>
+  <tr>
+   <td><a href="https://groups.google.com/a/fuchsia.dev/g/sdk-dev">sdk-dev</a>
+   </td>
+   <td>For conversation about the <a href="/docs/glossary.md#fuchsia-idk">Fuchsia Integrator Development Kit (IDK)</a>, formerly called the Fuchsia SDK.
+   </td>
+  </tr>
+  <tr>
+   <td><a href="https://groups.google.com/a/fuchsia.dev/g/security-dev">security-dev</a>
+   </td>
+   <td>For conversation about Fuchsia’s security architecture.
+   </td>
+  </tr>
+  <tr>
+   <td><a href="https://groups.google.com/a/fuchsia.dev/g/session-framework-dev">session-framework-dev</a>
+   </td>
+   <td>For conversation about <a href="/docs/glossary.md#session-framework">Session Framework</a>.
+   </td>
+  </tr>
+  <tr>
+   <td><a href="https://groups.google.com/a/fuchsia.dev/g/storage-dev">storage-dev</a>
+   </td>
+   <td>For conversation about filesystems and other storage concerns on Fuchsia.
+   </td>
+  </tr>
+  <tr>
+   <td><a href="https://groups.google.com/a/fuchsia.dev/g/toolchain-dev">toolchain-dev</a>
+   </td>
+   <td>For conversation about Fuchsia’s C/C++, Go, and Rust toolchain support.
+   </td>
+  </tr>
+  <tr>
+   <td><a href="https://groups.google.com/a/fuchsia.dev/g/rust-users">rust-users</a>
+   </td>
+   <td>Discussion list specifically for Rust toolchain support in Fuchsia.
+   </td>
+  </tr>
+  <tr>
+   <td><a href="https://groups.google.com/a/fuchsia.dev/g/ui-input-dev">ui-input-dev</a>
+   </td>
+   <td>Discussion list for those working on user input events.
+   </td>
+  </tr>
+  <tr>
+   <td><a href="https://groups.google.com/a/fuchsia.dev/g/zircon-dev">zircon-dev</a>
+   </td>
+   <td>For conversation about <a href="/docs/glossary.md#zircon">Zircon</a>.
+   </td>
+  </tr>
+</table>
+
+### Platform governance and management
+
+The following mailing lists are discussion channels for communicating with
+Fuchsia’s leadership:
+
+<table>
+  <tr>
+   <td><strong>Mailing list</strong>
+   </td>
+   <td><strong>Purpose</strong>
+   </td>
+  </tr>
+  <tr>
+   <td><a href="https://groups.google.com/a/fuchsia.dev/g/api-council">api-council</a>
+   </td>
+   <td>Discussion list for communicating with the <a href="/docs/contribute/governance/api_council.md">Fuchsia API Council</a>.
+   </td>
+  </tr>
+  <tr>
+   <td><a href="https://groups.google.com/a/fuchsia.dev/g/community-managers">community-managers</a>
+   </td>
+   <td>Inbound-only discussion list for contacting Fuchsia’s Community Managers with concerns or questions.
+   </td>
+  </tr>
+  <tr>
+   <td><a href="https://groups.google.com/a/fuchsia.dev/g/eng-council">eng-council</a>
+   </td>
+   <td>Discussion list for communicating with the Fuchsia <a href="/docs/contribute/governance/eng_council.md">Eng Council</a> about review requests, <a href="/docs/contribute/governance/rfcs.md">RFC</a> feedback, or RFC escalations.
+   </td>
+  </tr>
+  <tr>
+   <td><a href="https://groups.google.com/a/fuchsia.dev/g/eng-council-discuss">eng-council-discuss</a>
+   </td>
+   <td>Discussion list for communicating with the Fuchsia <a href="/docs/contribute/governance/eng_council.md">Eng Council</a>.
+   </td>
+  </tr>
+</table>
+
+## Contribute code
+
+The Fuchsia project encourages well-tested, high-quality contributions from
+anyone who wants to contribute to Fuchsia, not just from Googlers.
+
+To learn more about how to contribute a code change to Fuchsia, see [Contribute changes](/docs/development/source_code/contribute_changes.md).
+
+
+## Report an issue
+
+Did you see a line of documentation or code that you think needs to be improved?
+You can help get these kinds of concerns resolved by filing an issue in Monorail.
+For more information, see [Report an Issue](/docs/contribute/report-issue.md).
diff --git a/docs/contribute/community/images/change-resolution.png b/docs/contribute/community/images/change-resolution.png
new file mode 100644
index 0000000..624f9e9
--- /dev/null
+++ b/docs/contribute/community/images/change-resolution.png
Binary files differ
diff --git a/docs/contribute/community/index.md b/docs/contribute/community/index.md
new file mode 100644
index 0000000..28d7e40
--- /dev/null
+++ b/docs/contribute/community/index.md
@@ -0,0 +1,6 @@
+# Community
+
+This section details resources for the Fuchsia community, specifically:
+
+* [Get involved](/docs/contribute/community/get-involved.md)
+* [Contributor roles](/docs/contribute/community/contributor-roles.md)
diff --git a/docs/contribute/contributing-to-fidl/README.md b/docs/contribute/contributing-to-fidl/README.md
index 07e9c83..4f07c76 100644
--- a/docs/contribute/contributing-to-fidl/README.md
+++ b/docs/contribute/contributing-to-fidl/README.md
@@ -71,7 +71,7 @@
 
 Source compatibility tests are used to test FIDL's
 [source compatibility guarantees][abi-api-compat]. They are found in
-[/src/tests/fidl/source_compatibility] and [/topaz/tests/fidl-changes].
+[/src/tests/fidl/source_compatibility].
 
 ##### Compatibility
 
@@ -160,22 +160,24 @@
 
 #### References to other CLs
 
-To reference another CL in a commit message, always use the `Change-ID`.
+To reference another Gerrit change in a commit message, always use
+the `Change-ID`.
 
 Using the `Change-ID` is preferred since:
 
-* The git sha is only known after a change lands, and while guidance could be
-  given to use the `Change-Id` in one case, and the git sha in the other, we
-  prefer a uniform guidance.
-* The CL number is assigned by Gerrit, and is not part of the persistent history
-  of the repository. Should we change review mechanism, the `Change-Id` will
-  continue to be part of the recorded history, whereas CL numbers will not.
-  There are also rare occurrences where CL numbers may be lost, e.g. due to
-  re-indexing issues.
+* The git SHA is only known after a change is merged, and while guidance
+  could be given to use the `Change-Id` in one case, and the git SHA in the
+  other, we prefer a uniform guidance.
+* The link to the change is assigned by Gerrit, and is not part of the
+  persistent history of the repository. Should we change the review mechanism,
+  the `Change-Id` will continue to be part of the recorded history, whereas
+  change's number will not. There are also rare occurrences where change
+  numbers may be lost, e.g. due to re-indexing issues.
 
-For instance, to refer to the CL which added [FTP-042] we should use
-`I32b966810d21a249647887fa45b61720ad01714c`, and not the git sha
-`5d40ee8c42d1b0e4d8b690786da12a0a947c1aaa`, nor the CL number `fxr/284569`.
+For instance, to refer to the change that added [FTP-042] we should use
+`I32b966810d21a249647887fa45b61720ad01714c`, and not the git SHA
+`5d40ee8c42d1b0e4d8b690786da12a0a947c1aaa` or the link to the
+change, https://fuchsia-review.googlesource.com/c/fuchsia/+/284569.
 
 #### Multi-step change
 
@@ -729,7 +731,6 @@
 [/src/tests/benchmarks/fidl]: /src/tests/benchmarks/fidl
 
 [/src/tests/fidl/source_compatibility]: /src/tests/fidl/source_compatibility
-[/topaz/tests/fidl-changes]: /topaz/tests/fidl-changes
 
 [/src/tests/fidl/compatibility/]: /src/tests/fidl/compatibility/
 [/src/tests/fidl/dangerous_identifiers]: /src/tests/fidl/dangerous_identifiers
diff --git a/docs/contribute/contributing-to-fidl/fidl-tests-and-gn.md b/docs/contribute/contributing-to-fidl/fidl-tests-and-gn.md
index e01535b..e55123d 100644
--- a/docs/contribute/contributing-to-fidl/fidl-tests-and-gn.md
+++ b/docs/contribute/contributing-to-fidl/fidl-tests-and-gn.md
@@ -120,8 +120,8 @@
 
 We can now run the test by package name (`fx test fidl-foo-tests`) or by
 component name (`fx test fidl_foo_tests`). For single-test packages like this,
-**use the component name in documentation** (e.g. contributing_to_fidl.md,
-`"Test:"` lines in commit messages).
+**use the component name in documentation** (e.g.
+[Contributing to Fidl][contributing], `"Test:"` lines in commit messages).
 
 For multiple device tests, collect them all in a **single package** with
 `fuchsia_test_package`. For example, suppose we split `fidl_foo_tests` into
@@ -365,7 +365,7 @@
 This automatically creates a `baz_test` target that builds a `baz_lib_test`
 binary. **Do not use this**, for two reasons:
 
-1.  The [naming guidelines](#naming) require a `_tests` suffix, not `-test`.
+1.  The [naming guidelines](#naming) require a `_tests` suffix, not `_test`.
 2.  It will be
     [deprecated](https://fuchsia.googlesource.com/fuchsia/+/9d9f092f2b30598c3929bd30d0058d4e052bb0f4/build/rust/rustc_library.gni#91)
     soon.
@@ -424,3 +424,4 @@
 [package_names]: /docs/concepts/packages/package_url.md#package_identity
 [source_code_layout]: /docs/concepts/source_code/layout.md
 [building_components]: /docs/development/components/build.md
+[contributing]: /docs/contribute/contributing-to-fidl
diff --git a/docs/contribute/docs/code-sample-style-guide.md b/docs/contribute/docs/code-sample-style-guide.md
index 94a219e..5b5e353 100644
--- a/docs/contribute/docs/code-sample-style-guide.md
+++ b/docs/contribute/docs/code-sample-style-guide.md
@@ -1,6 +1,6 @@
 # Code sample style guidelines {:#overview}
 
-This document describes how to incorportate code samples in documentation, 
+This document describes how to incorportate code samples in documentation,
 and specific style guidelines for code samples. This includes:
 
   *  [Code sample best practices](#code-sample-best-practices)
@@ -16,7 +16,7 @@
 ## Code sample best practices {:#code-sample-best-practices}
 
 When creating a code sample for a part of Fuchsia that you are deeply familiar with,
-consider how a new user would read the sample and try to anticipate their needs. 
+consider how a new user would read the sample and try to anticipate their needs.
 Think about the process from end-to-end and include prerequisite steps to completing the process
 and specify what success looks like.
 
diff --git a/docs/contribute/docs/documentation-navigation-toc.md b/docs/contribute/docs/documentation-navigation-toc.md
index 59659c5..6babe67 100644
--- a/docs/contribute/docs/documentation-navigation-toc.md
+++ b/docs/contribute/docs/documentation-navigation-toc.md
@@ -72,7 +72,7 @@
   An expandable section is an expandable group of multiple entries in a table
   of contents. For example, see the expandable sections, such as Networking
   and Graphics, in the
-  [Concepts section](/docs/concepts/README.md). Each expandable
+  [Concepts section](/docs/concepts/index.md). Each expandable
   section has an arrow to the left of the section name.
 
   You can create a group of entries with a `section` element. Each section must
diff --git a/docs/contribute/docs/documentation-standards.md b/docs/contribute/docs/documentation-standards.md
index c5a09e5..91c557bd 100644
--- a/docs/contribute/docs/documentation-standards.md
+++ b/docs/contribute/docs/documentation-standards.md
@@ -44,7 +44,7 @@
     - Guides - Task-oriented documentation
 - [Conceptual](documentation-types.md#conceptual-documentation) - Foundational
   documentation focused on teaching more about Fuchsia, Fuchsia architecture, and Fuchsia components
-- [Reference](documentation-types.md#reference-documentation) - Documentation focused on 
+- [Reference](documentation-types.md#reference-documentation) - Documentation focused on
   detailing the syntax and parameters of Fuchsia APIs and tools. This documentation is usually
   auto-generated.
 
@@ -90,7 +90,7 @@
   complicated words when you describe something. It's ok to use contractions like "it's" or
   "don't".
 
-- **Be respectful** Follow the guidelines set forth in [Respectful Code](/docs/contribute/best-practices/respectful_code.md).
+- **Be respectful** Follow the guidelines set forth in [Respectful Code](/docs/contribute/respectful_code.md).
 
 - **Write in second-person ("you")**: Fuchsia documentation is written to the user ("you"). When
   For example, "You can install Fuchsia by doing the following...". Do not refer to the reader in the
@@ -100,7 +100,7 @@
 - **Write in present tense.** Always document the system as it is, not as it will be. Words such
   as "will" are very ambiguous. For example "you will see" leads to questions like "when will I see
   this?". In 1 minute or in 20 minutes? In addition, do not refer to future product features unless
-  necessary.  Mentioning future plans that might not happen becomes a maintenence burden. 
+  necessary.  Mentioning future plans that might not happen becomes a maintenance burden.
 
 - **Keep sentences short and concrete.** Using punctuation allows your reader to follow
   instructions or and understand concepts. Also, short sentences are much easier
@@ -129,9 +129,9 @@
   the [glossary](/docs/glossary.md).
 
 - **Avoid coloquial phrases or regional idioms** Keep in mind that a lot of users are non-native
-  English speakers interested in Fucshia. Avoid difficult to translate idioms, like 
+  English speakers interested in Fuchsia. Avoid difficult to translate idioms, like
   "that's the way the cookie crumbles". While it might make sense to you, it doesn't translate
-  well into other languages. 
+  well into other languages.
 
 - **Avoid referencing proprietary information.** This can refer to any potential terminology or
   product names that may be trademarked or any internal information (API keys, machine names, etc…)
diff --git a/docs/contribute/governance/README.md b/docs/contribute/governance/README.md
deleted file mode 100644
index b482565..0000000
--- a/docs/contribute/governance/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
-# Governance
-
-This section describes various policies and proposals that affect the Fuchsia
-project:
-
-* [Request for comments](/docs/contribute/governance/rfcs/0001_rfc_process.md)
-* [FIDL language tuning proposals](/docs/contribute/governance/fidl/README.md)
-* [Fuchsia programming language policy](/docs/contribute/governance/policy/programming_languages.md)
-* [Importing external dependencies](/docs/contribute/governance/policy/external_dependencies.md)
-
diff --git a/docs/contribute/governance/_toc.yaml b/docs/contribute/governance/_toc.yaml
index ed38a85..05a61d6 100644
--- a/docs/contribute/governance/_toc.yaml
+++ b/docs/contribute/governance/_toc.yaml
@@ -7,11 +7,17 @@
 # team as reviewer.
 toc:
 - title: "Overview"
-  path: /docs/contribute/governance/README.md
+  path: /docs/contribute/governance/index.md
+- title: "Governance"
+  path: /docs/contribute/governance/governance.md
 - title: "Eng Council"
   path: /docs/contribute/governance/eng_council.md
-- title: "API Council"
-  path: /docs/contribute/governance/api_council.md
+- title: "API"
+  section:
+  - title: "API Council"
+    path: /docs/contribute/governance/api_council.md
+  - title: "API Design Document template"
+    path: /docs/contribute/governance/api-design-template.md
 - title: "Project policy"
   section:
   - include: /docs/contribute/governance/policy/_toc.yaml
diff --git a/docs/contribute/governance/api-design-template.md b/docs/contribute/governance/api-design-template.md
new file mode 100644
index 0000000..ba79817
--- /dev/null
+++ b/docs/contribute/governance/api-design-template.md
@@ -0,0 +1,166 @@
+# Fuchsia API Design Document template
+
+This template details the sections that should be included in your API design
+document as well as the questions that should be answered by your API Design
+Document.
+
+## Summary
+
+A one paragraph summary of your change to the Fuchsia API.
+
+## Goals and use cases
+
+Your API design document is expected to answer the following questions
+regarding your API's use cases:
+
+  + What problem does this API or API feature solve?
+
+  + What would users of your API be able to accomplish?
+
+This section acknowledges that there is more than one solution that could
+resolve the problems your API is intended to fix. Construct your "Use cases"
+section in a way that doesn’t presuppose that the design proposed by your
+document is the only correct way to solve those use cases.
+
+## Design
+
+This section contains the technical details of your API design.
+
+This section contains the following:
+
+  + A high-level description of your approach, including:
+    + The key choices that you’ve made in your design.
+    + The actual code definition of the API, such as the FIDL definition of an
+      interface.
+
+  + A Gerrit change link that contains the code for your API:
+
+    + Your API design should conform to the [API readability rubric](/docs/concepts/api/README.md)
+      for the language that you use to write the API. The interface should be
+      fully documented at this stage but it does not need to be implemented.
+
+  + An explanation of the choices behind your API design and why you’ve
+    made those design choices.
+  + An explanation of how your API might evolve in the future.
+
+## Unknowns
+
+This section answers the following questions regarding
+your design's assumptions:
+
+  + What information are you missing that might help improve the design?
+  + Are there areas of your design that could be improved?
+  + Which parts of your design are you most interested in receiving feedback on
+    and why?
+
+## Usability
+
+This section answers the following questions regarding the usability of your API:
+
+  + Are the semantics of your API intuitive from its signature(s)?
+  + Have you designed the appropriate extensions points to allow for the future
+    evolution of your API?
+  + Does your API behave similarly to other Fuchsia APIs that do similar things?
+  + How does your API behave compared to similar APIs for other platforms?
+
+A good framework for thinking through the usability of your API is to
+write example programs that use your API. That exercise gets you thinking about
+how users experience your API and lets you experience any potential
+drawbacks of your design.
+
+If you find your API difficult to use while writing these examples, consider
+revising your API to improve its usability. Your users are end-developers.
+They should be key stakeholders when you consider how to design your API.
+
+## Testing
+
+This section answers the following questions regarding
+your API's approach to testing:
+
+  + How do you plan to test your API?
+    + You might have unit tests for your implementation, but you
+      might also want a medium-size test that exercises your implementation
+      through your API.
+      + Consider using `lib/component/cpp/testing`.
+  + If developers were to rely on your API feature, how would they test their
+    code?
+    + Consider providing a mock implementation of your API that clients
+      can use for testing purposes.
+
+## Performance considerations
+
+There is often a tension between performance and usability. The performance
+considerations for an API often vary by the frequency with which the API is
+used. This section should describe the choices that you’ve made to balance these
+concerns. Consider consulting the [API readability rubric](/docs/concepts/api/README.md)
+for language-specific guidance about how to balance these concerns.
+
+This section answers the following questions regarding how
+your API design affects performance:
+
+  + Does your API involve a large number of round-trips across a
+    process or thread boundary?
+  + Does your API involve blocking on a remote process or thread?
+  + Does your API involve copying large amounts of data?
+  + How many queries per second (QPS) do you expect your API to receive?
+  + How much data do you expect a typical query to transport?
+
+## Security considerations
+
+This section answers the following questions regarding
+how your API design considers security:
+
+  + Does your API expose security-sensitive information?
+  + Does your API let its users manipulate security sensitive resources?
+  + Are the users of your API isolated from each other?
+  + Does your API respect an object-capability discipline?
+  + Does your API encourage your users to use your API securely?
+    + Does your API encourage time-of-check to time-of-use (TOCTOU)
+      vulnerabilities?
+    + Does your API clearly separate any control planes from any data planes?
+
+If your API has non-trivial security considerations, you should consult
+with the security team and go through a formal security review. If this is the
+case, contact the [API council](https://groups.google.com/a/fuchsia.dev/g/api-council) about requesting a security review.
+
+When a security review is performed, provide a link to your security
+review in this section.
+
+## Privacy considerations
+
+This section answers the following questions regarding
+how your API design considers privacy:
+
+  + Does your API expose privacy-sensitive information?
+  + Does your API involve any personally identifiable information?
+  + Does your API involve any device identifiers?
+  + Does your API provide users control over how information is shared?
+
+If your API has non-trivial privacy considerations, go through a formal privacy
+review. When a privacy review is performed, provide a link to your privacy
+review in this section.
+
+## Drawbacks and alternatives
+
+Your API design document is expected to answer the following questions
+regarding how you've considered drawbacks as well as alternative
+implementations:
+
+  + Are there any disadvantages to your API design?
+  + What alternative designs did you consider?
+    + Why aren't you using these alternatives?
+    + Are there trade-offs or scenarios where these alternative designs may
+      be appropriate?
+
+## Submit your API Design Document
+
+To submit your API Design Document, do the following:
+
+1. Duplicate this markdown file.
+2. Edit the contents of that duplicate markdown file to include the answers to the template.
+3. Save your markdown file with a hyphenated name of your choosing.
+4. Submit your API Design Document markdown file by following the steps [Contribute changes](/docs/development/source_code/contribute_changes.md).
+
+After your Design Document has been submitted, it is reviewed by the API
+Council. For more information on the API Design Document review process, see the
+[Fuchsia API Council Charter](/docs/contribute/governance/api_council.md).
diff --git a/docs/contribute/governance/api_council.md b/docs/contribute/governance/api_council.md
index 8f6895c..62d4b6d 100644
--- a/docs/contribute/governance/api_council.md
+++ b/docs/contribute/governance/api_council.md
@@ -194,8 +194,8 @@
 council member responsible for that area, but any council member can approve the
 change if the responsible council member is unavailable.
 
-Before being merged, every CL that modifies the Fuchsia API Surface must receive
-an API-Review+1 from a member of [api-council@fuchsia.com][api-council-group] in
+Before being merged, every change that modifies the Fuchsia API Surface must receive
+an API-Review+1 from a member of [api-council@fuchsia.dev][api-council-group] in
 addition to the usual Code-Review+2. The same person can provide both
 API-Review+1 and Code-Review+2 for a given change, but someone cannot give their
 own CLs API-Review+1. See [Review Labels][review-labels] for documentation about
@@ -316,8 +316,7 @@
 <!-- Reference links -->
 
 [api-council-group]: https://groups.google.com/a/fuchsia.com/forum/#!forum/api-council
-<!-- TODO(fxb/62922): Publish the Fuchsia API Design Template -->
-[api-design-template]: http://go.corp.google.com/fuchsia-api-design-template
+[api-design-template]: /docs/contribute/governance/api-design-template.md
 [eng-council]: /docs/contribute/governance/eng_council.md
 [review-labels]: https://gerrit-review.googlesource.com/Documentation/config-labels.html
 [rough-consensus]: https://en.wikipedia.org/wiki/Rough_consensus
diff --git a/docs/contribute/governance/fidl/ftp/ftp-003.md b/docs/contribute/governance/fidl/ftp/ftp-003.md
index 4d01d07..23d01bf 100644
--- a/docs/contribute/governance/fidl/ftp/ftp-003.md
+++ b/docs/contribute/governance/fidl/ftp/ftp-003.md
@@ -41,7 +41,7 @@
   C++, and Dart bindings.
   There is no support for Go, C, or Rust.
 
-For example (from [//zircon/tools/fidl/examples/types.test.fidl][example-types]):
+For example (from [//zircon/system/host/fidl/examples/types.test.fidl][example-types]):
 
 ```fidl
 struct default_values {
@@ -254,5 +254,5 @@
 <!-- xref -->
 
 [grammar]: /docs/reference/fidl/language/grammar.md
-[example-types]: /zircon/tools/fidl/examples/types.test.fidl#45
+[example-types]: https://fuchsia.googlesource.com/fuchsia/+/1d98ab5e39255f8305825a18cd385198d6517569/zircon/system/host/fidl/examples/types.test.fidl#45
 [proto3-defaults]: https://developers.google.com/protocol-buffers/docs/proto3#default
diff --git a/docs/contribute/governance/governance.md b/docs/contribute/governance/governance.md
new file mode 100644
index 0000000..eb137e9
--- /dev/null
+++ b/docs/contribute/governance/governance.md
@@ -0,0 +1,45 @@
+# Fuchsia Governance
+
+Fuchsia is an open source project with the goal of creating a production-grade
+operating system that is secure, updatable, inclusive, and pragmatic.
+Learn more about [Fuchsia’s core principles](/docs/concepts/index.md).
+
+Google steers the direction of Fuchsia and makes platform decisions related to
+Fuchsia. While Googlers contribute substantially to Fuchsia’s code base, the
+Fuchsia project encourages contributions from anyone, not just from Googlers.
+
+## Become a contributor
+
+Fuchsia lets anyone contribute to the project, regardless of their employer.
+The Fuchsia project reviews and encourages well-tested, high-quality
+contributions from anyone who wants to contribute to Fuchsia.
+
+To help facilitate code reviews, Fuchsia has the following contribution
+guidelines and community resources:
+
+  * [Contribute changes](/docs/development/source_code/contribute_changes.md)
+  * [Fuchsia Code of Conduct](/CODE_OF_CONDUCT.md)
+  * [Fuchsia contributor roles](/docs/contribute/community/contributor-roles.md)
+
+Contributions to Fuchsia must be accompanied by a Google Contributor License
+Agreement. For more information on the Contributor License
+Agreement, see [Contributing to Fuchsia](/CONTRIBUTING.md#contributor_license_agreement).
+
+## Meet the Fuchsia Eng Council
+
+The [Fuchsia Eng Council](/docs/contribute/governance/eng_council.md)
+is a small group of senior technical leaders responsible for providing a
+coherent technical vision for Fuchsia. Members of the
+Fuchsia Eng Council are listed in the [Fuchsia Eng Council Charter](/docs/contribute/governance/eng_council.md#current_members).
+
+The Eng Council is responsible for maintaining the platform roadmap and approving
+or rejecting Fuchsia Requests for Comments (RFCs) using the [Fuchsia RFC Process](/docs/contribute/governance/rfcs/README.md).
+
+The Fuchsia RFC process is intended to provide a consistent and transparent
+path for making project-wide, technical decisions. For example, the RFC process
+can be used to evolve the project roadmap and the system architecture.
+
+## Review the Fuchsia Open Source Licensing Policies
+
+Fuchsia is subject to multiple licenses, for more information,
+see [Fuchsia Open Source Licensing Policies](/docs/contribute/governance/policy/open-source-licensing-policies.md).
diff --git a/docs/contribute/governance/index.md b/docs/contribute/governance/index.md
new file mode 100644
index 0000000..2d7e01c
--- /dev/null
+++ b/docs/contribute/governance/index.md
@@ -0,0 +1,13 @@
+# Overview
+
+This section describes various policies and proposals that affect the Fuchsia
+project:
+
+* [Governance](/docs/contribute/governance/governance.md)
+* [Request for comments](/docs/contribute/governance/rfcs/0001_rfc_process.md)
+* [FIDL language tuning proposals](/docs/contribute/governance/fidl/README.md)
+* [Fuchsia programming language policy](/docs/contribute/governance/policy/programming_languages.md)
+* [Importing external dependencies](/docs/contribute/governance/policy/external_dependencies.md)
+
+
+
diff --git a/docs/contribute/governance/policy/_toc.yaml b/docs/contribute/governance/policy/_toc.yaml
index ca1a1cb..9dfffb9 100644
--- a/docs/contribute/governance/policy/_toc.yaml
+++ b/docs/contribute/governance/policy/_toc.yaml
@@ -6,7 +6,13 @@
 # before making changes to this file, and add a member of the fuchsia.dev
 # team as reviewer.
 toc:
+- title: "Fuchsia Open Source Licensing Policies"
+  path: /docs/contribute/governance/policy/open-source-licensing-policies.md
+- title: "Open Source Review Board (OSRB) process"
+  path: /docs/contribute/governance/policy/osrb-process.md
 - title: "External dependencies"
   path: /docs/contribute/governance/policy/external_dependencies.md
 - title: "Programming languages"
   path: /docs/contribute/governance/policy/programming_languages.md
+- title: "Update channel usage policy"
+  path: /docs/contribute/governance/policy/update_channel_usage_policy.md
\ No newline at end of file
diff --git a/docs/contribute/governance/policy/open-source-licensing-policies.md b/docs/contribute/governance/policy/open-source-licensing-policies.md
new file mode 100644
index 0000000..0e6ffb3
--- /dev/null
+++ b/docs/contribute/governance/policy/open-source-licensing-policies.md
@@ -0,0 +1,209 @@
+# Fuchsia Open Source Licensing Policies
+
+## Fuchsia Project code
+
+All Fuchsia Project code is hosted on [https://fuchsia.googlesource.com/](https://fuchsia.googlesource.com/).
+Fuchsia Project file headers will list `Copyright <year> The Fuchsia Authors`.
+
+## Licenses
+
+Fuchsia is subject to multiple licenses:
+
+  *   The Fuchsia kernel is released under the following MIT-style license: [/zircon/kernel/LICENSE](/zircon/kernel/LICENSE).
+  *   All Fuchsia user space components are released under a BSD-style license: [/LICENSE](/LICENSE)
+      or an Apache 2.0 license: [https://fuchsia.googlesource.com/infra/+/master/LICENSE](https://fuchsia.googlesource.com/infra/+/master/LICENSE).
+  *   All code that is BSD-licensed has an additional IP grant: [/PATENTS](/PATENTS).
+
+Any code that has a different copyright holder or that is released under a
+different license is considered external code per this policy and must adhere
+to the external code policies in this document.
+
+## External code
+
+All external code hosted on [https://fuchsia.googlesource.com/](https://fuchsia.googlesource.com/)
+must be open source code
+released under a license from the [approved licenses list](#approved-licenses).
+All external code must be hosted in a repository whose name is prefixed with
+`third_party` or within a directory named `third_party` within one of Fuchsia’s
+existing repositories. If the code is hosted in its own repository, it must be
+mapped to a path including a directory named `third_party` when checked out as
+part of the Fuchsia Platform source tree. The set of licenses permitted for a
+particular portion of code depends on the usage of that code - see below for a
+list of approved licenses for [production targets](#production-target)
+and [development targets](#development-target).
+
+All code used in a Fuchsia build must be hosted on a Gerrit server run by
+Google. In almost all cases, the code must be hosted on [https://fuchsia.googlesource.com/](https://fuchsia.googlesource.com/).
+
+All source code must be downloaded when running `jiri update`. No build steps
+may download additional source code. Code from package management systems, such
+as packages from Dart’s Pub or crates from Rust’s Cargo, must be vendored into
+[https://fuchsia.googlesource.com/](https://fuchsia.googlesource.com/) and
+comply with the same license requirements as any
+other code in the project.
+
+## Licenses and tracking
+
+Refer to [What is a License?](https://opensource.google/docs/using/license/){:.external}
+for an explanation of what an open source license is and why it is important.
+All projects hosted on [https://fuchsia.googlesource.com/](https://fuchsia.googlesource.com/) must be released under an
+approved license and contain the full license text alongside the code.
+Simply stating the license by reference - i.e. “BSD3” - is not sufficient, the
+actual text must be included. In most cases, the project should be an entire
+repository and the license text should be at the top level of the repository
+in a file named LICENSE, COPYRIGHT, or similar. In rare cases where the Fuchsia
+project needs to host multiple logical projects in a single repository, for
+example in Fuchsia’s Dart pub vendor repository, each project must be in its
+own directory with the license text for that project and a top-level file in
+the repository must explain the set of licenses used by the subdirectories.
+
+To facilitate tracking, in addition to the license text, each project containing
+external code must contain a README file containing information about the
+project. The README must be named `README.fuchsia` and contain at least the
+following information:
+
+```
+Name: common name for the project
+
+URL: web site of upstream project
+
+LICENSE: short description of license
+```
+
+It’s also recommended, but not required, that the `README.fuchsia` files describe
+what version of the upstream project is being used and what kinds of
+modifications, if any, were made to port to Fuchsia. The short description of
+the license should be a [Software Package Data Exchange (SPDX) license identifier](https://spdx.org/licenses/){:.external}
+that matches the license but there can be more text in cases where more
+elaboration on the license is required. Fuchsia Project Code, that is, code that
+The Fuchsia Authors hold copyright for and code that is released under the
+Fuchsia project’s standard license - does not require this file.
+
+## Categories of code and allowed licenses
+
+### Production target {:#production-target}
+
+This section applies to all code that runs in a production Fuchsia-based device
+in use by an end user. In this document, “production target” is defined as a
+production Fuchsia-based device in use by an end user. “Production target”
+includes the kernel, drivers, system services, frameworks, programs, etc running
+on the device regardless of how they are deployed. Code is permitted in this
+target if it is released under one of the following licenses and that license
+only. If there are additional restrictions, such as an IP grant or other
+additional clause, then the license approval does not suffice for that
+portion of code.
+
+#### Approved licenses {:#approved-licenses}
+
+The following are the approved licenses for production target devices:
+
+  * BSD 3-clause license, specifically the text at [/LICENSE](/LICENSE)
+
+  * BSD 2-clause license, specifically the text at [https://opensource.org/licenses/BSD-2-Clause](https://opensource.org/licenses/BSD-2-Clause)
+
+  *   MIT license, specifically the text at [/zircon/kernel/LICENSE](/zircon/kernel/LICENSE)
+
+  *   Zlib license, specifically the text at [https://fuchsia.googlesource.com/third\_party/zlib/+/master/README#85](https://fuchsia.googlesource.com/third\_party/zlib/+/master/README#85)
+
+  *   Libpng license, specifically the text at [https://fuchsia.googlesource.com/third\_party/libpng/+/master/LICENSE](https://fuchsia.googlesource.com/third\_party/libpng/+/master/LICENSE)
+
+  *   Boost license 1.0, specifically the text at [https://fuchsia.googlesource.com/third\_party/asio/+/master/asio/LICENSE\_1\_0.txt](https://fuchsia.googlesource.com/third\_party/asio/+/master/asio/LICENSE\_1\_0.txt )
+
+  *   OpenSSL license, specifically the text at [https://fuchsia.googlesource.com/third\_party/boringssl/+/upstream/master/LICENSE](https://fuchsia.googlesource.com/third\_party/boringssl/+/upstream/master/LICENSE)
+
+  *   FreeType project license, specifically the text at [https://fuchsia.googlesource.com/third\_party/freetype2/+/master/docs/FTL.TXT](https://fuchsia.googlesource.com/third\_party/freetype2/+/master/docs/FTL.TXT)
+
+  *   Apache 2.0 license, specifically the text at [https://fuchsia.googlesource.com/third\_party/flatbuffers/+/master/LICENSE.txt](https://fuchsia.googlesource.com/third\_party/flatbuffers/+/master/LICENSE.txt)
+
+  *   Independent JPEG Group License (IJG), specifically the text at [https://fuchsia.googlesource.com/third\_party/iccjpeg/+/master/LICENSE](https://fuchsia.googlesource.com/third\_party/iccjpeg/+/master/LICENSE)
+
+  *   ICU license, specifically the text at [https://fuchsia.googlesource.com/third\_party/icu/+/master/LICENSE](https://fuchsia.googlesource.com/third\_party/icu/+/master/LICENSE)
+
+  *   Curl license, specifically the text at [https://fuchsia.googlesource.com/third\_party/curl/+/master/COPYING](https://fuchsia.googlesource.com/third\_party/curl/+/master/COPYING)
+
+  *   University of Illinois / NCSA Open Source License (NCSA), specifically the text at [https://fuchsia.googlesource.com/third\_party/clang/+/master/LICENSE.TXT](https://fuchsia.googlesource.com/third\_party/clang/+/master/LICENSE.TXT)
+
+  *   ISC license, specifically the text at [https://fuchsia.googlesource.com/third\_party/boringssl/+/upstream/master/LICENSE#143](https://fuchsia.googlesource.com/third\_party/boringssl/+/upstream/master/LICENSE#143)
+
+  *   IBM-Pibs license, specifically the text at [https://github.com/u-boot/u-boot/blob/master/Licenses/ibm-pibs.txt](https://github.com/u-boot/u-boot/blob/master/Licenses/ibm-pibs.txt)
+
+  *   R8a779x\_usb3 license, specifically the text at [https://github.com/u-boot/u-boot/blob/master/Licenses/r8a779x\_usb3.txt](https://github.com/u-boot/u-boot/blob/master/Licenses/r8a779x\_usb3.txt)
+
+  *   Creative Commons Attribution 3.0 Unported license at [https://creativecommons.org/licenses/by/3.0/legalcode](https://creativecommons.org/licenses/by/3.0/legalcode)
+
+  *   Creative Commons Attribution 4.0 International license at [https://creativecommons.org/licenses/by/4.0/legalcode](https://creativecommons.org/licenses/by/4.0/legalcode)
+
+Code not under one of these licenses and not explicitly granted an exemption by
+Fuchsia’s Open Source Review Board (OSRB) is not permitted in the production
+target.
+
+Licenses in the [restricted](https://opensource.google/docs/thirdparty/licenses/#restricted){:.external}
+or [reciprocal](https://opensource.google/docs/thirdparty/licenses/#reciprocal){:.external}
+categories will not be approved for use in Fuchsia.
+
+#### Specific exceptions
+
+The following repositories have been granted specific exemptions for production
+target devices:
+
+  *   [https://fuchsia.googlesource.com/third\_party/llvm/](https://fuchsia.googlesource.com/third\_party/llvm/)
+  *   [https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/tree/LICENCE.iwlwifi\_firmware](https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/tree/LICENCE.iwlwifi\_firmware)
+
+These exemptions apply only to these specific repositories, and do not apply to
+anything else no matter how similar they may seem.
+
+To request an exemption, contact the [Open Source Review Board (OSRB)](/docs/contribute/governance/policy/osrb-process.md).
+
+### Development target {:#development-target}
+
+This section applies to all code that is used by developers building things for
+Fuchsia including tools, debuggers, utilities, and examples. All licenses
+permitted for production targets are permitted for the development target.
+In this document, “development target” is defined as a non-production
+Fuchsia-based device in use by a Fuchsia developer and not an end user.
+
+#### Approved licenses
+
+Additionally, the following licenses are permitted for the development target:
+
+  * GNU General Public License v2.0 (GPL 2.0), specifically the text at [https://fuchsia.googlesource.com/third\_party/gdb/+/master/COPYING](https://fuchsia.googlesource.com/third\_party/gdb/+/master/COPYING)
+
+  * GNU Library General Public License 2.0 (LGPL 2.0), specifically the text at [https://spdx.org/licenses/LGPL-2.0.html#licenseText](https://spdx.org/licenses/LGPL-2.0.html#licenseText)
+
+  * GNU Lesser General Public License 2.1 (LGPL 2.1), specifically the text at [https://spdx.org/licenses/LGPL-2.1.html#licenseText](https://spdx.org/licenses/LGPL-2.1.html#licenseText)
+
+  * Open Font License 1.1 (OFL 1.1), specifically the text at [https://github.com/u-boot/u-boot/blob/master/Licenses/OFL.txt](https://github.com/u-boot/u-boot/blob/master/Licenses/OFL.txt)
+
+#### Hosting development artifacts
+
+To host an artifact (a binary or tarball) on
+[Google storage](https://cloud.google.com/storage/){:.external} for development
+purposes you must do the following:
+
+*   Verify all transitive dependencies are under approved licenses.
+*   Verify the exact source of all dependencies are hosted on [https://fuchsia.googlesource.com/](https://fuchsia.googlesource.com/).
+    *   If some components are hosted elsewhere, contact the [Open Source Review Board (OSRB)](/docs/contribute/governance/policy/osrb-process.md)
+        to check that the hosting arrangement satisfies the requirements of
+        the Fuchsia project.
+*   Produce a file containing the license text of the license of the binary and
+    all transitive dependencies. Serve this file with the
+    artifact (i.e., in a tar).
+
+## Modifying external code
+
+The process for modifying external code is the same as for modifying Fuchsia
+project code. Be sure to keep the appropriate `README.fuchsia` files up-to-date
+with a high level description of changes from upstream. Do not modify any
+existing copyright notice or license file when changing external code.
+
+## Support contacts
+
+### Add new external code
+
+For information on adding new external code, see
+[Open Source Review Board (OSRB) Process](/docs/contribute/governance/policy/osrb-process.md).
+
+### Questions
+
+If you have a question about Fuchsia’s external policies or how these policies
+relate to the Fuchsia project, email [external-code@fuchsia.dev](https://groups.google.com/a/fuchsia.dev/g/external-code).
diff --git a/docs/contribute/governance/policy/osrb-process.md b/docs/contribute/governance/policy/osrb-process.md
new file mode 100644
index 0000000..2084387
--- /dev/null
+++ b/docs/contribute/governance/policy/osrb-process.md
@@ -0,0 +1,55 @@
+# Open Source Review Board (OSRB) process
+
+This document details the processes for adding external code to the
+[Fuchsia Platform Source Tree](https://fuchsia.googlesource.com/).
+For more information on the definition of external code,
+see [Fuchsia Open Source Licensing Policies](/docs/contribute/governance/policy/open-source-licensing-policies.md).
+
+## Overview
+
+Any external code that is hosted within the [Fuchsia Platform Source Tree](https://fuchsia.googlesource.com/)
+, must be compliant with [Fuchsia Open Source Licensing Policies](/docs/contribute/governance/policy/open-source-licensing-policies.md)
+at all times.
+
+## Process for adding external code to new repositories
+
+To host external code within a new repository that does not exist yet, or does
+not yet contain any code, submit an issue in Monorail using the
+[Open Source Review Board (OSRB) template](https://bugs.fuchsia.dev/p/fuchsia/issues/entry?template=Open+Source+Review+Board+).
+
+This issue lets Fuchsia’s OSRB review the code that you want to add, to ensure
+that the code that you want to add is in compliance with [Fuchsia Open Source Licensing Policies](/docs/contribute/governance/policy/open-source-licensing-policies.md).
+
+When the request is approved, a member of the OSRB communicates the next steps
+to the requester.
+
+## Process for adding external code to repositories with existing external code
+
+To add external code to an existing Fuchsia repository, create an issue in
+Monorail using the [Open Source Review Board (OSRB) template](https://bugs.fuchsia.dev/p/fuchsia/issues/entry?template=Open+Source+Review+Board+).
+
+This Monorail issue lets Fuchsia’s OSRB review the code that you want to add
+to ensure that the code that you want to add is in compliance with
+[Fuchsia Open Source Licensing Policies](/docs/contribute/governance/policy/open-source-licensing-policies.md).
+
+When the request is approved, a member of the OSRB communicates the next steps
+to the requester.
+
+## Questions about adding external code
+
+If you are unsure if the external code that you want to add to the
+[Fuchsia Platform Source Tree](https://fuchsia.googlesource.com/) should be in
+a new repository or an existing repository, email [external-code@fuchsia.dev](https://groups.google.com/a/fuchsia.dev/g/external-code).
+In your email, include answers to the following questions:
+
+  * How many files is the code that you’re trying to import?
+  * Do you want the code that you’re importing to track upstream?
+
+## Process for modifying the stated Name, URL, License, or Usage of existing repositories
+
+To modify the Name, URL, License, or Usage of an existing Fuchsia repository,
+create an issue in Monorail using the [Open Source Review Board (OSRB) template](https://bugs.fuchsia.dev/p/fuchsia/issues/entry?template=Open+Source+Review+Board+).
+
+When the request is approved, a member of the OSRB communicates the next steps
+to the requester.
+
diff --git a/docs/contribute/best-practices/update_channel_usage_policy.md b/docs/contribute/governance/policy/update_channel_usage_policy.md
similarity index 100%
rename from docs/contribute/best-practices/update_channel_usage_policy.md
rename to docs/contribute/governance/policy/update_channel_usage_policy.md
diff --git a/docs/contribute/governance/rfcs/0001_rfc_process.md b/docs/contribute/governance/rfcs/0001_rfc_process.md
index ac4c9e5..79ec732 100644
--- a/docs/contribute/governance/rfcs/0001_rfc_process.md
+++ b/docs/contribute/governance/rfcs/0001_rfc_process.md
@@ -1,15 +1,7 @@
-# Fuchsia Request for Comments (RFC) process (RFC-0001)
-
-A process for making project-wide, technical decisions.
-
-Field     | Value
-----------|--------------------------
-Status    | Accepted
-Authors   | abarth@google.com
-Submitted | 2020-02-20
-Reviewed  | 2020-02-27
-
-[TOC]
+{% set rfcid = "RFC-0001" %}
+{% include "docs/contribute/governance/rfcs/_common/_rfc_header.md" %}
+# {{ rfc.name }} - {{ rfc.title }}
+<!-- *** DO NOT EDIT ABOVE THIS LINE -->
 
 ## Summary
 
@@ -243,7 +235,10 @@
 
 The decision whether to accept an RFC is made by the Eng Council, acting in
 [rough consensus](https://en.wikipedia.org/wiki/Rough_consensus) with each
-other. If the Eng Council cannot reach rough consensus, the RFC is not accepted.
+other. If the decision involves an RFC that has Eng Council members as authors,
+those members must recuse themselves from the decision.
+
+If the Eng Council cannot reach rough consensus, the RFC is not accepted.
 In deciding whether to accept an RFC, the Eng Council will consider the
 following factors:
 
@@ -309,3 +304,4 @@
    similar process [to make decisions about the FIDL
    language](/docs/contribute/governance/fidl/README.md#process). This
    proposal exists because of the success of that decision-making process.
+
diff --git a/docs/contribute/governance/rfcs/0002_platform_versioning.md b/docs/contribute/governance/rfcs/0002_platform_versioning.md
index d903ad1..165b359 100644
--- a/docs/contribute/governance/rfcs/0002_platform_versioning.md
+++ b/docs/contribute/governance/rfcs/0002_platform_versioning.md
@@ -1,16 +1,7 @@
-# Fuchsia Platform Versioning (RFC-0002)
-
-Uses versioning to let the platform evolve while offering compatibility.
-
-Field     | Value
-----------|--------------------------
-Status    | Accepted
-Author(s) | abarth@google.com
-Submitted | 2020-03-30
-Reviewed  | 2020-04-23
-
-
-[TOC]
+{% set rfcid = "RFC-0002" %}
+{% include "docs/contribute/governance/rfcs/_common/_rfc_header.md" %}
+# {{ rfc.name }} - {{ rfc.title }}
+<!-- *** DO NOT EDIT ABOVE THIS LINE -->
 
 ## Summary
 
diff --git a/docs/contribute/governance/rfcs/0003_logging.md b/docs/contribute/governance/rfcs/0003_logging.md
index 7da57fe..efce956 100644
--- a/docs/contribute/governance/rfcs/0003_logging.md
+++ b/docs/contribute/governance/rfcs/0003_logging.md
@@ -1,16 +1,7 @@
-# Fuchsia Logging Guidelines (RFC-0003)
-
-Best practices for using log severities. Applications of log severities in
-testing and in-field metrics.
-
-Field     | Value
---------- | -------------------------------------
-Status    | Accepted
-Author(s) | fsamuel@google.com, shayba@google.com
-Submitted | 2020-06-03
-Reviewed  | 2020-06-10
-
-[TOC]
+{% set rfcid = "RFC-0003" %}
+{% include "docs/contribute/governance/rfcs/_common/_rfc_header.md" %}
+# {{ rfc.name }} - {{ rfc.title }}
+<!-- *** DO NOT EDIT ABOVE THIS LINE -->
 
 ## Summary
 
@@ -150,7 +141,7 @@
 observed within the test realm.
 
 The test runtime behavior described above has been implemented, tested, and
-[documented](/docs/concepts/testing/test_component.md#restricting_log_severity).
+[documented](/docs/concepts/testing/v1_test_component.md#restricting_log_severity).
 
 ## Security Consideration
 
diff --git a/docs/contribute/governance/rfcs/0004_units_of_bytes.md b/docs/contribute/governance/rfcs/0004_units_of_bytes.md
index 24d042d..cfa420f 100644
--- a/docs/contribute/governance/rfcs/0004_units_of_bytes.md
+++ b/docs/contribute/governance/rfcs/0004_units_of_bytes.md
@@ -1,13 +1,7 @@
-# Units of Bytes (RFC-0004)
-
-Field     | Value
-----------|--------------------------
-Status    | Step 2: Draft
-Author(s) | dschuyler@google.com, abbogart@google.com
-Reviewed  |
-Issue     | *link to bugs.fuchsia.dev issue*
-
-[TOC]
+{% set rfcid = "RFC-0004" %}
+{% include "docs/contribute/governance/rfcs/_common/_rfc_header.md" %}
+# {{ rfc.name }} - {{ rfc.title }}
+<!-- *** DO NOT EDIT ABOVE THIS LINE -->
 
 ## Summary
 
diff --git a/docs/contribute/governance/rfcs/0005_blobfs_snapshots.md b/docs/contribute/governance/rfcs/0005_blobfs_snapshots.md
index 04b8258..30ec095 100644
--- a/docs/contribute/governance/rfcs/0005_blobfs_snapshots.md
+++ b/docs/contribute/governance/rfcs/0005_blobfs_snapshots.md
@@ -1,16 +1,7 @@
-# Blobfs Snapshots
-
-Support for Blobfs snapshots during upgrades.
-
-Field     | Value
---------- | --------------------------------------
-Status    | Accepted
-Author(s) | csuter@google.com, jfsulliv@google.com
-Submitted | 2020-09-07
-Reviewed  | 2020-09-19
-Issue     | [fxbug.dev/59567](http://fxbug.dev/59567)
-
-[TOC]
+{% set rfcid = "RFC-0005" %}
+{% include "docs/contribute/governance/rfcs/_common/_rfc_header.md" %}
+# {{ rfc.name }} - {{ rfc.title }}
+<!-- *** DO NOT EDIT ABOVE THIS LINE -->
 
 ## Summary
 
diff --git a/docs/contribute/governance/rfcs/0006_addendum_to_rfc_process_for_zircon.md b/docs/contribute/governance/rfcs/0006_addendum_to_rfc_process_for_zircon.md
index 963990c3..af91951 100644
--- a/docs/contribute/governance/rfcs/0006_addendum_to_rfc_process_for_zircon.md
+++ b/docs/contribute/governance/rfcs/0006_addendum_to_rfc_process_for_zircon.md
@@ -1,16 +1,7 @@
-# Addendum of the RFC Process for Zircon (RFC-0006)
-
-Special considerations when using the Fuchsia RFC process for Zircon.
-
-Field     | Value
-----------|--------------------------
-Status    | Accepted
-Author(s) | cpu@google.com
-Submitted | 2020-08-17
-Reviewed  | 2020-09-24
-
-
-[TOC]
+{% set rfcid = "RFC-0006" %}
+{% include "docs/contribute/governance/rfcs/_common/_rfc_header.md" %}
+# {{ rfc.name }} - {{ rfc.title }}
+<!-- *** DO NOT EDIT ABOVE THIS LINE -->
 
 ## Summary
 
diff --git a/docs/contribute/governance/rfcs/0007_remove_thread_killing.md b/docs/contribute/governance/rfcs/0007_remove_thread_killing.md
index 474ef29..7c1e9b5 100644
--- a/docs/contribute/governance/rfcs/0007_remove_thread_killing.md
+++ b/docs/contribute/governance/rfcs/0007_remove_thread_killing.md
@@ -1,16 +1,7 @@
-# Zircon Removal of Thread Killing (RFC-0007)
-
-This document discusses the removal of thread killing functionality and the reasoning
-behind that removal.
-
-Field     | Value
-----------|--------------------------
-Status    | Accepted
-Author(s) | dgilhooley@google.com
-Submitted | 2020-09-25
-Reviewed  | 2020-10-06
-
-[TOC]
+{% set rfcid = "RFC-0007" %}
+{% include "docs/contribute/governance/rfcs/_common/_rfc_header.md" %}
+# {{ rfc.name }} - {{ rfc.title }}
+<!-- *** DO NOT EDIT ABOVE THIS LINE -->
 
 ## Summary
 
diff --git a/docs/contribute/governance/rfcs/0008_remove_zx_clock_get_and_adjust.md b/docs/contribute/governance/rfcs/0008_remove_zx_clock_get_and_adjust.md
index 114700f..6b492ca 100644
--- a/docs/contribute/governance/rfcs/0008_remove_zx_clock_get_and_adjust.md
+++ b/docs/contribute/governance/rfcs/0008_remove_zx_clock_get_and_adjust.md
@@ -1,16 +1,7 @@
-# Remove zx_clock_get and zx_clock_adjust (RFC-0008)
-
-Defines the plan to deprecate and then remove the `zx_clock_get` and
-`zx_clock_adjust` syscalls.
-
-Field     | Value
-----------|--------------------------
-Status    | Accepted
-Author(s) | jsankey@, based on earlier work by johngro@
-Submitted | 2020-10-21
-Reviewed  | 2020-10-29
-Issue     | [61987](https://fxbug.dev/61987)
-
+{% set rfcid = "RFC-0008" %}
+{% include "docs/contribute/governance/rfcs/_common/_rfc_header.md" %}
+# {{ rfc.name }} - {{ rfc.title }}
+<!-- *** DO NOT EDIT ABOVE THIS LINE -->
 
 ## Summary
 
@@ -208,4 +199,4 @@
 ## Prior art and references
 
 [kernel_objects/clock](/docs/reference/kernel_objects/clock.md) provides a clear
-overview of the operation of userspace clocks and is recommended reading.
\ No newline at end of file
+overview of the operation of userspace clocks and is recommended reading.
diff --git a/docs/contribute/governance/rfcs/0009_edge_triggered_async_wait.md b/docs/contribute/governance/rfcs/0009_edge_triggered_async_wait.md
index 4ba6c78..225dba5 100644
--- a/docs/contribute/governance/rfcs/0009_edge_triggered_async_wait.md
+++ b/docs/contribute/governance/rfcs/0009_edge_triggered_async_wait.md
@@ -1,14 +1,7 @@
-# Edge triggered async_wait (RFC-0009)
-
-Field     | Value
-----------|--------------------------
-Status    | Accepted
-Author(s) | steveaustin@google.com
-Submitted | 2020-10-22
-Reviewed  | 2020-11-04
-Issue     | [fxbug.dev/62553](http://fxbug.dev/62553) [fxbug.dev/45709](http://fxbug.dev/45709)
-
-[TOC]
+{% set rfcid = "RFC-0009" %}
+{% include "docs/contribute/governance/rfcs/_common/_rfc_header.md" %}
+# {{ rfc.name }} - {{ rfc.title }}
+<!-- *** DO NOT EDIT ABOVE THIS LINE -->
 
 ## Summary
 
diff --git a/docs/contribute/governance/rfcs/0010_channel_iovec.md b/docs/contribute/governance/rfcs/0010_channel_iovec.md
index 5c2783e..2525436 100644
--- a/docs/contribute/governance/rfcs/0010_channel_iovec.md
+++ b/docs/contribute/governance/rfcs/0010_channel_iovec.md
@@ -1,18 +1,7 @@
-# zx_channel_iovec_t support for zx_channel_write and zx_channel_call (RFC-0010)
-
-This RFC introduces a new mode to zx_channel_write and zx_channel_call
-which copies input data from multiple memory regions rather than from a
-single contiguous buffer.
-
-Field     | Value
-----------|--------------------------
-Status    | Accepted
-Author(s) | bprosnitz@google.com
-Submitted | 2020-10-01
-Reviewed  | 2020-11-06
-Issue     | [fxbug.dev/60623](http://fxbug.dev/60623)
-
-[TOC]
+{% set rfcid = "RFC-0010" %}
+{% include "docs/contribute/governance/rfcs/_common/_rfc_header.md" %}
+# {{ rfc.name }} - {{ rfc.title }}
+<!-- *** DO NOT EDIT ABOVE THIS LINE -->
 
 ## Summary
 
@@ -114,8 +103,7 @@
 ### Alignment
 
 There are no alignment restrictions on the bytes specified in a
-`zx_channel_iovec_t`. Each `zx_channel_iovec_t` will be copied in order without
-padding.
+`zx_channel_iovec_t`. Each `zx_channel_iovec_t` will be copied without padding.
 
 ### Limits
 
@@ -139,10 +127,11 @@
 
 ### Kernel
 
-After receiving the `ZX_CHANNEL_WRITE_USE_IOVEC` option, the kernel will
-iterate through each of the `zx_channel_iovec_t` iovec objects:
-- For each `zx_channel_iovec_t`, copy the pointed data in from user space to
-  the message buffer in order.
+After receiving the `ZX_CHANNEL_WRITE_USE_IOVEC` option, the kernel will:
+- Copy the data pointed to by the `zx_channel_iovec_t` objects to the message
+  buffer.  While the copy operations will typically also be performed in order
+  of the `zx_channel_iovec_t` inputs, it is not mandatory. However, the final
+  message must be laid out in the order of the `zx_channel_iovec_t` entries.
 - Write the message to the channel.
 
 ### FIDL
diff --git a/docs/contribute/governance/rfcs/0011_getinfo_kmemstats_extended.md b/docs/contribute/governance/rfcs/0011_getinfo_kmemstats_extended.md
index c422692..d047d15 100644
--- a/docs/contribute/governance/rfcs/0011_getinfo_kmemstats_extended.md
+++ b/docs/contribute/governance/rfcs/0011_getinfo_kmemstats_extended.md
@@ -1,15 +1,7 @@
-# zx_object_get_info ZX_INFO_KMEM_STATS_EXTENDED (RFC-0011)
-
-New `ZX_INFO_KMEM_STATS_EXTENDED` topic for the `zx_object_get_info()` syscall.
-
-Field     | Value
-----------|--------------------------
-Status    | Accepted
-Author(s) | rashaeqbal@google.com
-Submitted | 2020-11-04
-Reviewed  | 2020-11-20
-
-[TOC]
+{% set rfcid = "RFC-0011" %}
+{% include "docs/contribute/governance/rfcs/_common/_rfc_header.md" %}
+# {{ rfc.name }} - {{ rfc.title }}
+<!-- *** DO NOT EDIT ABOVE THIS LINE -->
 
 ## Summary
 
diff --git a/docs/contribute/governance/rfcs/0012_zircon_discardable_memory.md b/docs/contribute/governance/rfcs/0012_zircon_discardable_memory.md
new file mode 100644
index 0000000..ff2194c
--- /dev/null
+++ b/docs/contribute/governance/rfcs/0012_zircon_discardable_memory.md
@@ -0,0 +1,514 @@
+{% set rfcid = "RFC-0012" %}
+{% include "docs/contribute/governance/rfcs/_common/_rfc_header.md" %}
+# {{ rfc.name }} - {{ rfc.title }}
+<!-- *** DO NOT EDIT ABOVE THIS LINE -->
+
+## Summary
+
+This RFC describes a mechanism for userspace applications to indicate to
+the kernel that certain memory buffers are eligible for reclamation. The kernel
+is then free to discard these buffers when the system is running low on
+available memory.
+
+## Motivation
+
+Managing free memory is a complex problem in an overcommit system like Zircon,
+where user applications are allowed to allocate more memory than might currently
+be available on the system.  This is accomplished by using Virtual Memory
+Objects (VMOs) that are lazily backed by physical pages as portions within them
+are committed.
+
+Overestimating the amount of physical memory that will be in use at any point in
+time, and failing further memory allocation requests based on that, can leave
+free memory on the table. This can affect performance, as a lot of this memory
+is used by applications for caching purposes. On the other hand, underestimating
+the amount of free memory in use, can cause us to quickly use up all of the
+available memory on the system, leading to an out-of-memory (OOM) scenario.
+Furthermore, the definition of “free” memory itself is complex.
+
+The Zircon kernel monitors the amount of free physical memory and generates
+memory pressure signals at various levels. The purpose of these signals is to
+allow userspace applications to scale back (or grow) their memory footprint
+based on system-wide free memory levels. While this helps keep the system from
+running out of memory, the decoupling of the initiator of these signals (the
+kernel) from the responder (user applications) is not ideal. Processes that
+respond to memory pressure do not have enough context around how much memory
+they should be freeing; the kernel has a better picture of global memory usage
+on the system, and it can also take into consideration other forms of
+reclaimable memory, e.g. user pager backed memory that can be evicted.
+
+This RFC proposes a mechanism using which the kernel will directly be able
+to reclaim userspace memory buffers under memory pressure. There are a few
+advantages to this approach:
+- It allows for greater control over how much memory is evicted; the kernel can
+  look at free memory levels and evict only as much memory as required.
+- The kernel can use an LRU scheme to discard memory, which might work better at
+  accommodating the current working set in memory.
+- Userspace can sometimes be slow to drop memory in response to memory pressure
+  signals. In some cases, it might be too late for the system to recover.
+- Userspace clients waking up to respond to memory pressure can sometimes
+  require more memory.
+
+## Design
+
+### Overview
+
+The discardable memory protocol would roughly work as follows:
+
+1. A userspace process creates a VMO and marks it as *discardable*.
+2. Before accessing the VMO either directly (`zx_vmo_read`/`zx_vmo_write`), or
+   through a mapping in its address space (`zx_vmar_map`), the process *locks*
+   the VMO indicating that it is in use.
+3. The process *unlocks* the VMO when done, indicating that it is no longer
+   needed. The kernel will consider all unlocked discardable VMOs as eligible
+   for reclamation, and will be free to discard them under memory pressure.
+4. When the process needs to access the VMO again, it will try to lock it. This
+   lock can now succeed in one of two ways.
+    - The lock can succeed with the pages of the VMO still intact, i.e. the
+      kernel has not discarded it yet.
+    - If the kernel has discarded the VMO, the lock will succeed whilst also
+      indicating to the client that its pages have been discarded, so that
+      they can reinitialize it or take other necessary actions.
+5. The process will unlock the VMO again when done. Locking and unlocking can
+   repeat in this fashion as often as required.
+
+Note that discardable memory is not meant as a direct replacement for memory
+pressure signals.  Watching for memory pressure changes is still valuable for
+other component-level decisions, like choosing when to launch memory intensive
+activities or threads. In the future, we could also use these signals to kill
+idle processes within a component. Memory pressure signals also provide
+components greater control over exactly what memory to free and when.
+
+### Discardable Memory API
+
+We can extend the existing `zx_vmo_create()` and `zx_vmo_op_range()` syscalls to
+support this feature.
+
+- `zx_vmo_create()` will be extended to support a new `options` flag -
+  `ZX_VMO_DISCARDABLE`. This flag can be used in combination with
+  `ZX_VMO_RESIZABLE`. However, the general advice about resizable VMOs also
+  applies to discardable VMOs - sharing resizable VMOs between processes can be
+  dangerous and should be avoided.
+
+- `zx_vmo_op_range()` will be extended to support new operations to provide
+  locking and unlocking - `ZX_VMO_OP_LOCK`, `ZX_VMO_OP_TRY_LOCK`, and
+  `ZX_VMO_OP_UNLOCK`.
+
+- Locking and unlocking will apply to the entire VMO, so `offset` and `size`
+  should span the whole range of the VMO. It is an error to lock and unlock a
+  smaller range within the VMO. While the current implementation does not
+  strictly require an `offset` and `size`, ensuring that only the entire range
+  of the VMO is considered valid allows for adding sub-range support in the
+  future without changing the behavior for clients.
+
+- The `ZX_VMO_OP_TRY_LOCK` operation will attempt to lock the VMO and can fail.
+  It will succeed if the kernel has not discarded the VMO, and fail with
+  `ZX_ERR_NOT_AVAILABLE` if the kernel has discarded it. In case of failure, the
+  client is expected to try again with `ZX_VMO_OP_LOCK` which is guaranteed to
+  succeed as long as the arguments passed in are valid. The `ZX_VMO_OP_TRY_LOCK`
+  operation is provided as a lightweight option to try locking the VMO without
+  having to set up the buffer argument. Clients can also choose to not take any
+  action following failure to lock the VMO.
+
+- The `ZX_VMO_OP_LOCK` operation will also require the `buffer` argument, an out
+  pointer to a `zx_vmo_lock_state` struct. This struct is meant for the kernel
+  to pass back information that the client might find useful, and consists of:
+  - `offset` and `size` tracking the locked range: These are the `size` and
+    `offset` arguments that were passed in by the client. These are returned
+    purely for convenience, so that the client does not need to keep track of
+    ranges separately, and instead can directly use the returned struct. If the
+    call succeeds, they will always be the same as the `size` and `offset`
+    values passed into the `zx_vmo_op_range()` call.
+  - `discarded_offset` and `discarded_size` tracking the discarded range: This
+    is the maximal range within the locked range that contains discarded pages.
+    Not all pages within this range might have been discarded - it is simply a
+    union of all the discarded sub-ranges within this range, and can contain
+    pages that have not been discarded as well. With the current API, the
+    discarded range will span the entire VMO if the kernel has discarded it. If
+    undiscarded, both `discarded_offset` and `discarded_size` will be set to
+    zero.
+
+- Locking itself does not commit any pages in the VMO. It just marks the state
+  of the VMO as “undiscardable” by the kernel. The client can commit pages in
+  the VMO using any of the existing methods that apply to regular VMOs, e.g.
+  `zx_vmo_write()`, `ZX_VMO_OP_COMMIT`, mapping the VMO and directly writing to
+  mapped addresses.
+
+```
+// |options| supports a new flag - ZX_VMO_DISCARDABLE.
+zx_status_t zx_vmo_create(uint64_t size, uint32_t options, zx_handle_t* out);
+
+// |op| is ZX_VMO_OP_LOCK, ZX_VMO_OP_TRY_LOCK, and ZX_VMO_OP_UNLOCK to
+// respectively lock, try lock and unlock a discardable VMO.
+// |offset| must be 0 and |size| must the size of the VMO.
+//
+// ZX_VMO_OP_LOCK requires |buffer| to point to a |zx_vmo_lock_state| struct,
+// and |buffer_size| to be the size of the struct.
+//
+// Returns ZX_ERR_NOT_SUPPORTED if the vmo has not been created with the
+// ZX_VMO_DISCARDABLE flag.
+zx_status_t zx_vmo_op_range(zx_handle_t handle,
+                            uint32_t op,
+                            uint64_t offset,
+                            uint64_t size,
+                            void* buffer,
+                            size_t buffer_size);
+
+// |buffer| for ZX_VMO_OP_LOCK is a pointer to struct |zx_vmo_lock_state|.
+typedef struct zx_vmo_lock_state {
+  // The |offset| that was passed in.
+  uint64_t offset;
+  // The |size| that was passed in.
+  uint64_t size;
+  // Start of the discarded range. Will be 0 if undiscarded.
+  uint64_t discarded_offset;
+  // The size of discarded range. Will be 0 if undiscarded.
+  uint64_t discarded_size;
+} zx_vmo_lock_state_t;
+
+```
+
+The `zx::vmo` interface will be extended to support the `ZX_VMO_OP_LOCK`,
+`ZX_VMO_OP_TRY_LOCK` and `ZX_VMO_OP_UNLOCK` ops with `op_range()`. Rust, Go and
+Dart bindings will be updated as well.
+
+This API provides clients with the flexibility to share the discardable VMO
+across multiple processes. Each process that needs to access the VMO can do so
+independently, locking and unlocking the VMO as required. There is no careful
+coordination required amongst processes based on assumptions about the locked
+state. The kernel will only consider a VMO eligible for reclamation when no one
+has it locked.
+
+#### Restrictions on VMOs
+
+- The discardable memory API is supported only for `VmObjectPaged` types, as
+  `VmObjectPhysical` cannot be discarded by definition.
+
+- The API is not compatible with VMO clones (both snapshots and COW clones) and
+  slices, since discarding VMOs in a clone hierarchy can lead to surprising
+  behaviors. The `zx_vmo_create_child()` syscall will fail on discardable VMOs.
+
+- When mapping discardable VMOs, `zx_vmar_map()` will require the
+  `ZX_VM_ALLOW_FAULTS` flag. This forces the client to explicitly acknowledge
+  that they’re prepared to handle faults if the VMO is discarded.
+
+- The `ZX_VMO_DISCARDABLE` flag cannot be used in the `options` argument for
+  `zx_pager_create_vmo()`. A major reason for this is that pager-backed VMOs can
+  be cloned, and discardable VMOs cannot.  Moreover, discardability is implied
+  for pager-backed VMOs, so an additional flag is not required.
+
+#### Interaction with existing VMO operations
+
+The semantics of existing VMO operations will remain the same as before. For
+example, `zx_vmo_read()` will not verify that a discardable VMO is locked before
+permitting the operation. It is the client’s responsibility to ensure that they
+have the VMO locked when they are accessing it, to ensure that the kernel does
+not discard it from under them. This limits the surface area of this change. The
+only guarantee the kernel provides is that it won’t discard a VMO’s pages while
+it is locked.
+
+Any mappings for the VMO will continue to be valid even if the VMO is discarded,
+as long as the client locks the VMO before accessing the mappings. Clients do
+not need to recreate mappings if the VMO has been discarded.
+
+After the kernel has discarded a VMO, any further operations on it without first
+locking it, will fail as if the VMO had no committed pages, and there exists no
+mechanism to commit pages on demand.  For example, a `zx_vmo_read()` will fail
+with `ZX_ERR_OUT_OF_RANGE`. If the VMO was mapped in a process’ address space,
+unlocked accesses to mapped addresses will result in fatal page fault
+exceptions.
+
+### Kernel Implementation
+
+#### Tracking metadata
+
+- The `options_` bitmask in `VmObjectPaged` will be extended to support a
+  `kDiscardable` flag; we’re currently only using 4 bits out of 32.
+- A new `lock_count` field will be added to `VmObjectPaged`, which will track
+  the number of outstanding lock operations on the VMO.
+- The kernel will maintain a global list of *reclaimable* VMOs, i.e. all
+  unlocked discardable VMOs on the system. The list will be updated as follows:
+    - A `ZX_VMO_OP_LOCK` will increment the VMO’s `lock_count`. If `lock_count`
+      goes from 0->1, the VMO will be removed from the global reclaimable list.
+    - A `ZX_VMO_OP_UNLOCK` will decrement the VMO’s `lock_count`. If
+      `lock_count` drops to 0, the VMO will be added to the global reclaimable
+      list.
+
+#### Reclamation logic
+
+Discardable VMOs are added to the global reclaimable list when their
+`lock_count` drops to zero, and are removed when locked again. This maintains an
+LRU order of all unlocked discardable VMOs on the system. When under memory
+pressure, the kernel can dequeue VMOs from this list in order and discard them,
+checking the free memory level after each. This is a very simplistic version of
+what the reclamation logic might look like in practice. A few more things to
+consider are mentioned later.
+
+#### Discard operation
+
+A “discard” is implemented on the kernel side as resizing the VMO to zero, and
+the old size is restored on a subsequent `ZX_VMO_OP_LOCK`. This gives us the
+ability to generate exceptions when a discarded unlocked VMO is accessed through
+a mapping, and also the ability to fail syscalls. Note that even though the
+internal view the kernel has indicates that the VMO has a size of zero, the
+client will always see the size of the VMO as the size it was created with, or
+explicitly set to with `zx_vmo_set_size()`. Resizing to zero is purely an
+implementation detail in the kernel, chosen for convenience, based on the
+desired behavior for failure cases.
+
+## Implementation
+
+This is a new API so there are no dependencies at this stage. The kernel-side
+implementation can be done in isolation. Once the API has been implemented,
+userspace clients can start adopting it.
+
+## Performance
+
+The performance implications will vary based on the client side use cases. There
+a few things clients can keep in mind when using the API.
+
+- The `zx_vmo_op_range()` syscalls to lock and unlock discardable VMOs before
+  access can add noticeable latency on performance critical paths. So the
+  syscalls should be used on code paths where an increased latency can be
+  tolerated or hidden.
+- Clients could also see a boost in performance, due to caches being held in
+  memory for longer periods. Buffers that were necessarily dropped by clients
+  under memory pressure, can now be held for longer as the kernel will only
+  discard as much memory as required. Clients can track this change with cache
+  hit rates, number of times buffers need to be re-initialized etc.
+
+## Security considerations
+
+None.
+
+## Privacy considerations
+
+None.
+
+## Testing
+
+- Core tests / unit tests that exercise the new API from multiple threads.
+- Unit tests that verify the reclamation behavior on the kernel side, i.e. only
+  unlocked VMOs can be discarded.
+
+## Documentation
+
+The Zircon syscall documentation will need to be updated to include the new API.
+
+## Drawbacks, alternatives, and unknowns
+
+### Locking ranges within a VMO
+
+The granularity of reclamation is chosen as the entire VMO, instead of
+supporting finer-grained discard operations of ranges within a VMO. There are a
+few reasons behind this.
+- Reconstructing a VMO which has some pages discarded can be tricky. Considering
+  the generic use case, where a VMO is used to represent an anonymous memory
+  buffer, repopulating discarded pages would likely be zero fills, which might
+  not always make sense w.r.t. the remaining pages that were left undiscarded.
+  It might also not be valuable to hold on to only a subset of the VMO’s pages,
+  i.e. the VMO is meaningful only when it is fully populated.
+- VMO granularity keeps the `VmObjectPaged` implementation simple, requiring
+  minimal tracking metadata. We don’t need to track locked ranges to later match
+  with unlocks. There is no complicated range merging involved either.
+- It also keeps the reclamation logic fairly lightweight, allowing for large
+  chunks of memory to be freed at once. Supporting page granularity instead
+  would likely require maintaining page queues, and aging discardable pages,
+  similar to the mechanism we use to evict user pager backed pages.
+
+The proposed API does leave the door open to indicate reclaimable ranges in the
+future if required, with the `offset` and `size` arguments in
+`zx_vmo_op_range()` that are currently unused. Adding range support to the
+locking API (page granularity locking) seems like a natural extension to the
+current proposal. This will benefit clients where the cost of backing small
+discardable regions with individual VMOs can be prohibitive.
+
+### Kernel implementation of discard
+
+When the kernel reclaims a discardable VMO, it essentially resizes it to zero.
+The other alternative here would be to leave the size of the VMO unaltered, and
+instead simply decommit its pages.  Resizing the VMO allows for a stricter
+failure model. For example, consider the case where a client had a discardable
+VMO mapped in its address space, which the kernel discarded at some point. If
+the client now tries to access the VMO via the mapping without first locking the
+VMO, it will incur a fatal page fault. Whereas if the kernel were to simply
+decommit pages, a subsequent unlocked access would simply result in a zero page
+being silently handed to the client. This could either go undetected, or result
+in more subtle errors due to unexpected zero pages.
+
+We could also model the same behavior in the kernel via another mechanism which
+tracks the discarded state of the VMO, but the logic would likely look very
+similar to a resize to zero. Implementing a discard as a resize allows us to use
+existing logic that gives us the desired behavior for free.
+
+### Faster locking API with atomics
+
+This locking optimization provides an alternate low-latency option to lock and
+unlock discardable VMOs, and is meant to be used by clients that expect to lock
+and unlock fairly frequently. It is purely a performance optimization, and as
+such can be a feature we add in the future if required.
+
+The API uses a locking primitive called a Metex, which is similar to a Zircon
+futex, in that it allows fast locking via userspace atomics, thereby saving on
+the cost of a syscall.
+
+A discardable VMO can be associated with a metex, which will be used to lock and
+unlock it, instead of the `zx_vmo_op_range()` syscall.  A metex can have three
+states: locked (in use by the userspace client), discardable (eligible for
+reclamation by the kernel), and “needs syscall” (might have been reclaimed by
+the kernel, a syscall is required to check the state).  Locking and unlocking
+the VMO can be performed without entering the kernel by atomically flipping the
+state of the metex between locked and discardable.  When the kernel discards the
+VMO, it will atomically flip its state to "needs syscall", indicating that the
+client needs to synchronize with the kernel to check on the discarded state.
+More details of this proposal are out of the scope of this RFC, and will be
+provided in a separate one.
+
+### Pager based creation API
+
+Any VMO that is backed by a pager is essentially a discardable VMO, because the
+pager provides a mechanism to repopulate discarded pages on demand. The type of
+discardable memory being proposed in this RFC is anonymous discardable
+memory; the other type is file-backed discardable memory, an example of which is
+the in-memory representation of blobs populated by the blobfs user pager.
+Keeping this in mind, we can consider an alternate creation API where
+discardable VMOs are associated with a pager. The VMO creation call might look
+something like this:
+
+```
+zx_pager_create(0, &pager_handle);
+
+zx_pager_create_vmo(pager_handle, 0, pager_port_handle, vmo_key, vmo_size,
+                    &vmo_handle);
+```
+
+Locking and unlocking would work as proposed earlier with `zx_vmo_op_range()`.
+The kernel would be free to discard pages from a VMO only when unlocked.
+
+The advantage here is that it provides us with a unified creation API applicable
+to all kinds of discardable memory - irrespective of whether it is file-backed
+or anonymous.
+
+However, the pager in this case does not really serve a special purpose. Since
+it deals with generic anonymous memory, it is likely only going to provide zero
+pages on demand. A pager is more suited for cases where pages need to be
+populated in a specialized manner with certain specific content.  Introducing an
+additional layer of indirection, both in terms of technical complexity and
+performance overhead, just for the purpose of creating zero pages on demand
+seems unnecessary; this functionality already exists in the kernel for regular
+(non pager-backed) VMOs.
+
+### Locking with a retainer object
+
+The locking API proposed here leaves room for bugs where a discardable VMO can
+be unintentionally (or maliciously) unlocked. We could have situations where a
+process thinks that a VMO is locked, but another process has unlocked it, i.e.
+the second process issues an extra unlock. This would cause the first process to
+error out or crash when it accesses the VMO, even though it did correctly lock
+it before access.
+
+Instead of lock and unlock operations, we could implement locking with a
+retainer object, which would lock the VMO when created and unlock it when
+destroyed.
+
+```
+zx_vmo_create_retainer(vmo_handle, &retainer_handle);
+```
+
+The VMO would remain locked as long as the retainer handle is open. In the
+example above, each of the two processes would use their own retainers to lock
+the VMO, removing the possibility of an erroneous extra unlock. This locking
+model reduces the likelihood of such bugs, and makes them easy to diagnose when
+they occur.
+
+The downside here is that the kernel will need to store more metadata to track
+the locked state of a VMO. We now have a list of retainer objects associated
+with a discardable VMO, instead of a single `lock_count` field. We might also
+want to cap the length of this list if we want to eliminate the possibility of a
+malicious user causing unbounded growth in the kernel.
+
+### Priorities for reclamation order
+
+To keep things simple to begin with, the kernel will reclaim unlocked
+discardable VMOs in LRU order.  We could explore having clients explicitly
+specify a priority order of reclamation in the future if required (VMOs in each
+priority band could still be reclaimed in LRU order). The proposed API leaves
+the door open to support this in the future, via the currently unused `buffer`
+parameter in `zx_vmo_op_range()` for `ZX_VMO_OP_UNLOCK`.
+
+This level of control is something we might not require though; a global LRU
+order might be sufficient. If clients did want to exercise more control over
+when certain buffers are reclaimed, they could instead opt into memory pressure
+signals, and drop those buffers themselves.
+
+### Interaction with other reclamation strategies
+
+Currently there are two other mechanisms by which we can reclaim memory:
+- Page eviction of user pager backed memory (in-memory blobs), which is done by
+  the kernel at the CRITICAL  memory pressure level (and near OOM).
+- Memory pressure signals, where userspace components themselves free memory at
+  CRITICAL and WARNING memory pressure levels.
+
+We will need to figure out where discardable memory sits in this scheme,
+ensuring that no single reclamation strategy takes the majority of the burden.
+For example, we might want to maintain some kind of eviction ratio of
+file-backed memory to discardable memory.
+
+### Locking pager-backed VMOs
+
+We could extend the `ZX_VMO_OP_LOCK` and `ZX_VMO_OP_UNLOCK` operations to
+pager-backed VMOs in the future. There has been a desire to support locking of
+user pager backed VMOs in the past, which we might want to provide if a concrete
+use case arises. For example, blobfs could lock VMOs in memory for blobs that it
+deems important, or that do not fit the kernel LRU eviction scheme too well,
+thereby avoiding the performance cost of re-paging them.
+
+Locking pager-backed VMOs would tie in nicely with the discardable memory API,
+since user pager backed VMOs can essentially be viewed as a type of discardable
+memory, where the user pager provides a specialized mechanism to repopulate
+pages. Locking and unlocking would then apply to both types of discardable
+memory, the major difference between the two types being the way they are
+created and populated.
+
+### Deciding when to repopulate discarded VMOs
+
+Clients might need a way to figure out when it is safe to repopulate a discarded
+VMO. If the VMO is repopulated under memory pressure, the additional pages
+committed might worsen the memory pressure on the system, pushing it closer to
+OOM. Also, once the VMO is subsequently unlocked, there is a chance it might get
+discarded if the memory pressure persists. This can lead to thrashing, where the
+client repeatedly repopulates the VMO, only to see the kernel discard it soon
+after.
+
+Currently the only mechanism to observe system memory pressure levels is by
+subscribing to the `fuchsia.memorypressure` service, which can be pretty
+expensive for this use case. We could consider extending this service to provide
+a way to perform one-off queries. We could also consider passing back an
+indicator of the pressure level through the `zx_vmo_lock_state` struct - either
+the current memory pressure level itself, or a boolean that coarsely captures
+whether the system is under memory pressure.
+
+### Debug aid to track unlocked VMO accesses
+
+It might be useful to enable additional checks behind a build flag, that fail
+syscalls on unlocked discardable VMOs. This would help developers easily find
+bugs where a VMO access is not preceded by a lock, without having to rely on the
+VMO being discarded under memory pressure, and only then resulting in failures.
+Such checks on the locked state of a VMO can quickly become expensive as we add
+range support in the future, so they are not feasible to enable in production,
+but they might prove useful as a debug tool.
+
+Catching unlocked VMO accesses through mappings might be more tricky to
+implement. A couple of approaches that we could explore to accomplish this:
+ - Unmap a mapped discardable VMO when it is unlocked. With this approach, we
+   would need to make sure that existing VMO / VMAR semantics remain unchanged.
+ - Teach wrappers around lock / unlock calls to tell ASAN that an unlocked VMO's
+   mapping should be considered poisoned until it is locked again, using the
+   [`ASAN_POISON_MEMORY_REGION`](https://github.com/llvm-mirror/compiler-rt/blob/master/include/sanitizer/asan_interface.h)
+   interface.
+
+## Prior art and references
+
+- [`ashmem`](https://android.googlesource.com/platform/system/core/+/dd7bc3319deb2b77c5d07a51b7d6cd7e11b5beb0/include/cutils/ashmem.h) on Android
+- [`ReclaimVirtualMemory`](https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-reclaimvirtualmemory) on Windows
+- [`NSCache`](https://developer.apple.com/documentation/foundation/nscache) on macOS
diff --git a/docs/contribute/governance/rfcs/0014_relax_fifo_create_constraints.md b/docs/contribute/governance/rfcs/0014_relax_fifo_create_constraints.md
new file mode 100644
index 0000000..3fb3b86
--- /dev/null
+++ b/docs/contribute/governance/rfcs/0014_relax_fifo_create_constraints.md
@@ -0,0 +1,79 @@
+{% set rfcid = "RFC-0014" %}
+{% include "docs/contribute/governance/rfcs/_common/_rfc_header.md" %}
+# {{ rfc.name }} - {{ rfc.title }}
+<!-- *** DO NOT EDIT ABOVE THIS LINE -->
+
+## Summary
+
+`zx_fifo_create` currently requires the caller to pass an element count that is a power of two.
+This allows the kernel to make a small optimization on the read and write paths by using a
+bit-mask instead of the modulus operator to determine the offset of a wrapped record in a buffer.
+
+This constraint means that users cannot effectively use the FIFO to its full capacity if their
+element sizes do not happen to be `4096/(2**n)` bytes; in these cases the FIFO capacity
+(`element_size * element_count`) will be less than possible full capacity of the FIFO (which
+internally uses a `4096`-byte buffer).
+
+The proposal is to allow FIFOs to be created with arbitrary element counts (up to the FIFO size
+limit) so the full FIFO capacity can be used for FIFO records of arbitrary sizes.
+
+## Motivation
+
+This proposal is motivated by the desire to add a field to an existing FIFO record used in the
+block stack; the field in question is a trace identifier which will be used to support
+cross-process tracing of block IO. Adding this field changes the size of the FIFO records from
+`32` bytes to `40` bytes.
+
+If FIFO records are `40` bytes long, then the maximum value that `element_count` could currently
+be set to is `64`, which results in `40 * 64 = 2560` bytes used in the `4096`-byte buffer.
+
+This RFC would permit an `element_count` value of `102`, which results in `40 * 102 = 4080` bytes
+used (almost doubling the usable capacity of the FIFO).
+
+## Design
+
+This is a trivial change that is implemented in
+<https://fuchsia-review.googlesource.com/c/fuchsia/+/409498>.
+
+## Implementation
+
+This is a trivial change that is implemented in
+<https://fuchsia-review.googlesource.com/c/fuchsia/+/409498>.
+
+## Performance
+
+The kernel can currently make a small performance optimization since it knows the element count
+is a power of two; particularly, the kernel can use a bitwise-AND to determine the position of an
+element in the internal buffer, instead of a modulo.
+
+That said, this performance optimization washes out given the expense of jumping back and forth
+between the kernel and userspace.
+
+Concretely: performance testing in <https://fuchsia-review.googlesource.com/c/fuchsia/+/409498>
+indicates that there are no measurable performance costs for eliminating this optimization.
+
+## Security considerations
+
+None.
+
+## Privacy considerations
+
+None.
+
+## Testing
+
+This is a trivial change for which tests are added in
+<https://fuchsia-review.googlesource.com/c/fuchsia/+/409498>.
+
+## Documentation
+
+The `zx_fifo_create` documentation is adjusted in
+https://fuchsia-review.googlesource.com/c/fuchsia/+/409498 based on the new relaxed constraints.
+
+## Drawbacks, alternatives, and unknowns
+
+None.
+
+## Prior art and references
+
+None.
diff --git a/docs/contribute/governance/rfcs/0015_cts.md b/docs/contribute/governance/rfcs/0015_cts.md
new file mode 100644
index 0000000..3ca8b7d
--- /dev/null
+++ b/docs/contribute/governance/rfcs/0015_cts.md
@@ -0,0 +1,370 @@
+{% set rfcid = "RFC-0015" %}
+{% include "docs/contribute/governance/rfcs/_common/_rfc_header.md" %}
+# {{ rfc.name }} - {{ rfc.title }}
+<!-- *** DO NOT EDIT ABOVE THIS LINE -->
+
+## Summary
+
+This document presents requirements, design, and implementation strategy for a
+Compatibility Test Suite (CTS) for the Fuchsia platform. A CTS will offer a way
+of testing platform implementations to make sure that they behave according to
+Fuchsia's specifications. Fuchsia developers will write tests that guarantee
+compatibility across changes to both the source and behavior of the platform.
+When these tests pass, they will guarantee that a particular release, when
+run on a particular device, is compatible with a target API level and target ABI
+revision, as defined in [RFC-0002].
+
+For the purposes of this document, we refer to API and ABI jointly as the
+_platform surface area_. A _platform surface area element_ is a named and
+versioned identifier associated with the platform surface area (e.g., a method
+name). Future RFCs may formalize these definitions.
+
+## Motivation
+
+All of the open-source tests for Fuchsia platform behavior are currently
+(December 2020) built and run as part of the platform build. As the platform
+evolves, we keep the tests passing at head. As a result, we have no tests that
+guarantee backwards compatibility with older versions of the platform.
+
+Currently, we use a number of product tests to ensure compatibility and
+stability. These product tests are difficult to triage: because they rely on
+the stability of the product, and target many different parts of the platform,
+it is difficult for platform engineers to determine where the bug may be.
+
+At the same time, developers are writing code that targets older versions of the
+Fuchsia platform surface area. In this document, we refer to such developers as
+_end developers_.
+
+As we roll out API breaking changes, we have no safeguards in place that raise a
+flag when we break compatibility with end developers' code. Over the course of
+the project, undocumented API changes have frequently been released that cause
+external code to stop building.
+
+Furthermore, we are currently building out strong guarantees about backwards ABI
+compatibility. As of 9 November 2020, we require a six-week ABI compatibility
+window, but have no enforcement mechanism for it.
+
+We need a set of tests that we can execute independently from the platform build
+that identify clearly when we break our contracts with end developers. This
+will help ensure that we maintain compatibility with externally developed code,
+and provide more easily triaged, targeted test coverage for the parts of the
+platform that are currently only exercised by product tests.
+
+In the long term, we will also need a set of tests that system integrators can
+execute to know if they are producing a compliant Fuchsia implementation.
+
+Fuchsia's CTS will offer a way of testing platform implementations to make sure
+that they are compatible with particular platform releases (as defined in
+[FTP-0002]). We aspire to have a test for each documented behavior in the
+platform surface area.
+
+When we create a release, we can use the CTS to tell us about the compatibility
+of its surface area with that of other release versions.
+
+When someone is developing a device running Fuchsia, and wants to see if it is
+compatible with a given SDK, they can take the CTS and the SDK with which it
+wants to demonstrate compatibility, pass the tests, and have confidence that
+their product correctly implements Fuchsia's semantics - it will be "Fuchsia
+compatible".
+
+When a developer wants to understand how to write code against a particular API,
+or using a particular ABI, they will be able to use these tests as reference.
+
+[RFC-0002] allows a platform implementation to provide partial support for
+backwards compatibility. CTS will provide a way to test partial compatibility.
+
+Note that the CTS is not intended as a complete solution for platform evolution
+and backwards compatibility. It's not likely that CTS tests will cover every
+last use case. API and ABI will still have to be designed with future use cases
+in mind. See the section on [drawbacks and
+alternatives](#drawbacks_alternatives_and-unknowns) for additional discussion.
+
+## Design
+
+The CTS design involves balancing ease of development with the need to build and
+run the CTS itself outside of the Fuchsia repository. The requirements are as
+follows:
+
+1. There should be a CTS test for every documented behavior of every platform
+   surface area element. Although we expect this to become a hard requirement
+   eventually, this RFC does not specify such a requirement.
+
+1. CTS tests may not rely on any internal details of a particular system image.
+   To the extent that they rely on other platform code, that code must be
+   bundled as part of the CTS and also not rely on any internal details of a
+   particular system image.
+
+1. CTS tests must be updated by developers (that is, tests must be added or
+   modified) when adding or changing elements of the platform surface area.
+
+1. It must be possible to determine the API level and ABI revision of Fuchsia
+   that a given CTS artifact targets.
+
+1. CTS tests that are not included in the test suite as prebuilt artifacts must
+   be written in languages supported by the SDK being used to test (see the
+   [supported languages document] and the [Language](#Language) section below
+   for more details).
+
+### Authoring the Tests
+
+We develop CTS tests alongside their corresponding SDK elements. Today, that
+means we develop the tests in fuchsia.git. While it would be nice if CTS
+developers had the same experience as the out of tree developers who use the
+SDK, there are too many advantages to in-tree development to ignore:
+
+1. Because feature development is done alongside test development, in-tree
+   development of the tests will allow test authors to use a workflow with which
+   they are familiar, as well as submit the test in the same CL as the feature.
+
+1. Because the feature will be submitted at the same time as the test, there is
+   no need for any machinery to align the CTS and the version that it qualifies.
+
+CTS tests will use build-time enforcement to ensure that CTS tests can only
+depend on SDK elements or other pre-approved CTS code. One of the dangers of
+developing in-tree is that we may accidentally take on dependencies on platform
+implementation details that are not exposed via the SDK. CTS tests must only
+access publicly facing elements of the platform to prevent accidentally relying
+on implementation details. CTS tests may use platform code that is appropriate
+for writing tests (e.g., zxtest); such code will ship as part of the CTS
+artifact.
+
+CTS tests must not take dependencies on third party libraries that rely on the
+SDK for their Fuchsia support. Third party libraries that require SDK elements
+to support Fuchsia are going to be built against a particular SDK. We must make
+sure that our tests are as decoupled as possible from anyone else's SDK
+dependencies, as third party code may rely on platform features that we need to
+exclude from the tests. For example, if we rely on a test suite that heavily
+uses locking, it may be inappropriate for testing features of Zircon used to
+implement locking. Because of this restriction, we will use zxtest rather than
+gtest.
+
+An artifact containing the CTS tests relevant to a given SDK will be published
+alongside that SDK. This artifact will also contain build system support
+sufficient to build and run the CTS tests outside of the tree. It will not
+contain a toolchain.
+
+The tests must exercise language support thoroughly. See the section on
+[Language Support](#language) for more details.
+
+## Implementation
+
+### Coverage Requirements
+
+All updates to Fuchsia platform surface area elements should include tests that
+exercise the documented surface. This includes, but is not limited to, C/C++
+headers, FIDL API, the FIDL wire format, and any surface described by the
+[Fuchsia System Interface] document. If the surface area element can be
+accessed by developers via an SDK, it must be tested.
+
+We recognize that it may not be practical to require tests at this point. As
+the CTS and platform grows, we expect this requirement will become more strict.
+
+Almost all changes that require API review should have CTS tests, and API
+reviewers should review with that in mind. The final review will be made by
+testability reviewers, who should only approve platform surface area changes if
+they are appropriately covered by CTS tests.
+
+All tests are subject to the same review requirements as any other code
+submitted to the tree. Note that this does not mean that tests must be run as
+part of the commit queue, although we expect most will be. Examples of tests
+that might not be run as part of the commit queue include manual tests and tests
+that take longer than the commit queue allows.
+
+#### Directory structure
+
+The structure of the `//sdk/cts/tests` directory mirrors the structure of
+released SDKs. Tests go in the directory that mirrors the one where the
+interface under test is found in an SDK. For example:
+
+  * Tests for host tools should go in `//sdk/cts/tests/tools`
+  * Tests for FIDL interfaces should go in the appropriate
+    subdirectory of `//sdk/cts/tests/fidl`. For example, tests for
+    `fuchsia.sysmem` should go in `//sdk/cts/tests/fidl/fuchsia.sysmem`.
+  * Tests for libraries should go in the appropriate subdirectory of
+    `//sdk/cts/tests/pkg`. For example, tests for `async-loop` should go in
+    `//sdk/cts/tests/pkg/async-loop`.
+
+If Fuchsia developers are not clear on where to put their tests, they should
+consult the OWNERS of the relevant directory.
+
+#### Build support
+
+CTS tests target API and ABI that are available through externally-available
+SDKs. Build support ensures that tests only depend on API elements that are
+either available via an SDK, or allowlisted for use within the CTS. All build
+targets that are not allowlisted must use the `cts_` rule variants found in
+`//sdk/cts/build` instead of the standard fuchsia.git rules (i.e., use
+`cts_fuchsia_component`, `cts_executable`, and so on).
+
+The allowlist for non-SDK code can be found in
+`//sdk/cts/build/allowed_cts_deps.gni`. Test authors who believe they need an
+additional inclusion should reach out to the OWNERS of this directory.
+
+#### Language
+
+##### Target-side tests
+
+All API tests must be written in languages supported by the SDK they test. In
+most cases, this implies C++. ABI tests may be written in any language; in
+order to avoid having to build external support for languages we do not support
+via the SDK, if an ABI test needs to be in another language, we will include it
+as a prebuilt binary or package (whichever is more appropriate).
+
+Tests for particular headers must be written in a language that supports that
+header. As of this writing, C headers target C11 and C++11 and above, and C++
+headers target C++14 and above.
+
+CTS tests may restrict themselves to a particular language version. For
+example, we may decide that particular tests are restricted to C++14 in order to
+guarantee that headers maintain C++14 compatibility.
+
+##### Host-side tests
+
+Language restrictions for target-side tests are not applicable to host-side
+tests. The language for host-side tests is test-specific. If it will require
+the CTS to depend on a new toolchain, the decision should be made in
+consultation with the CTS team. For end-to-end tests and scripts that run on
+the host, as of this writing, we support the use of Dart (and, specifically
+`sl4f`). As supported languages change, documentation will be made available
+about which languages are supported for host-side testing.
+
+#### Test Requirements
+
+Tests should contain a check for every documented assertion about a particular
+API or ABI. For example, if we have a class `fit::basic_string_view`, and it
+has a method `size` that is documented to return the size of the string_view, we
+would have a test that creates a string_view, calls the `size` method, and
+asserts that the return value is correct.
+
+We recognize that it may be difficult to do this in some cases, and that some
+tests may require specific device setup that may be hard to replicate. We
+recommend that developers start working on testing early in the development
+cycle. The long-term goal is to make CTS testing a requirement for all changes
+to the platform surface area.
+
+Tests should reflect best practices about the usage of a given API. Informally,
+if an end developer copies a test's usage of the API, the test author would
+believe that developer is using the API correctly. Tests should, to the extent
+possible, not depend on undocumented, application-specific invariants. In the
+future, in the case of widespread use of undocumented behaviors outside of the
+Fuchsia tree, we may need to add tests for use cases that do not follow
+recommended usages.
+
+Wherever possible, tests should avoid creating test doubles (e.g., mocks and
+fakes) for the internal state of the target device. The intent of the CTS is to
+make sure the entire device behaves correctly, not to make sure that a
+particular component behaves correctly in isolation.
+
+However, this does not mean that CTS tests cannot benefit from fakes in some
+environments.  For example, for the purposes of using CTS tests to ensure
+platform stability, we may find it useful to exercise tests that require real
+hardware or manual input, such audio or connectivity tests, in an automated
+environment that does not have those features available.  While a CTS test
+itself should avoid the use of test doubles, the device under test can use fake
+drivers that feed the test fake data.  CTS tests can rely on such drivers in
+cases where using real hardware is not practical.
+
+If necessary, tests may require manual intervention to pass. We recommend that
+developers thoroughly investigate the possibility of automation.
+
+### Deployment
+
+CTS artifacts will be generated alongside the SDK artifacts that contain the
+relevant platform surface elements. Because of the soft transition requirements
+of RFC-0002, we expect that every SDK build will successfully execute the CTS
+associated with the previous build of the same SDK. As a proof of concept, we
+will implement infrastructure to guarantee this.
+
+CTS artifacts will contain a test harness and build rules for gn. They will not
+contain a build system or toolchain; this must be supplied in the test execution
+environment. We will document which toolchains are known to be compatible with
+a given CTS.
+
+### Examples
+
+Test examples can be found in fuchsia.git at `//sdk/cts/`.
+
+## Performance
+
+This change will have the following performance impact:
+
+ * An increase in time to run all platform tests, stemming from an increased
+   number of tests.
+ * No impact on production performance, because the changes are test-only.
+
+## Security considerations
+
+Because changes associated with this RFC are test-only, they are a low security
+risk. Tests are not expected to interact with untrusted data from external
+sources.
+
+## Privacy considerations
+
+Because changes associated with this RFC are test-only, they are a low privacy
+risk. Tests are not expected to interact with user data.
+
+## Testing
+
+This proposal will increase the testing matrix for the platform. For example,
+given the six-week ABI stability guarantee, all ABI tests from the CTS generated
+six weeks earlier than a given build should be run and complete successfully
+against that build.
+
+The new requirements in this proposal will also increase the overall number of
+platform tests.
+
+As many required properties of the test framework as is practical will be
+enforced automatically; for example, the framework will automatically check that
+only allowed dependencies are included.
+
+## Documentation
+
+Documentation on how to write CTS tests will be included in `//docs`. There
+will be updates to testability and API process documents to reflect new CTS test
+authorship requirements. The steps needed to run CTS out of tree will be
+documented, so that end developers and system integrators can do them
+independently.
+
+## Drawbacks, alternatives, and unknowns
+
+The chief drawback of this proposal is that it creates a significant new testing
+requirement for all changes to the platform surface area.
+
+It is not a goal of the CTS effort to provide a complete solution to evolution
+and backwards compatibility issues. APIs and ABIs will have to be designed
+carefully to ensure that developers can migrate their code at a reasonable cost.
+For example, the FIDL team evolves language bindings with extreme care: they
+have a [clear specification for how bindings ought to
+work](/docs/reference/fidl/language/bindings-spec.md), and actively [tracks how
+conformant the various bindings
+are](/docs/development/languages/fidl/guides/abi-api-compat.md).
+
+The CTS approach is a standard industry approach to maintaining backwards
+compatibility. Other approaches include:
+
+ * Simply being careful. We know empirically that this does not work by itself.
+ * Not evolving the platform. Obviously, simply never making changes is not
+   practical. Most scaled down versions of this (for example, shipping most of
+   an application's dependencies with it, or providing a virtual environment for
+   every application) are at odds with Fuchsia's design principles and product
+   goals.
+ * Formal verification. We do not consider formal verification to be a scalable
+   alternative to testing.
+
+## Prior art and references
+
+Android solves this problem by releasing a
+[CTS](https://source.android.com/compatibility/cts) with their product.
+Developers of new Android devices must ensure that their devices pass the CTS.
+
+As part of its [Windows Hardware Compatibility
+Program](https://docs.microsoft.com/en-us/windows-hardware/design/compatibility/),
+Microsoft produces a [Windows Hardware Lab
+Kit](https://docs.microsoft.com/en-us/windows-hardware/test/hlk/) that they
+distribute to developers of new Windows hardware.
+
+<!-- xrefs -->
+[RFC-0002]: /docs/contribute/governance/rfcs/0002_platform_versioning.md
+[supported languages document]: /docs/contribute/governance/policy/programming_languages.md
+[Fuchsia System Interface]: /docs/concepts/system/abi/system.md
+[Fuchsia language policy]: /docs/contribute/governance/policy/programming_languages.md
diff --git a/docs/contribute/governance/rfcs/OWNERS b/docs/contribute/governance/rfcs/OWNERS
index 92150c7..3382007 100644
--- a/docs/contribute/governance/rfcs/OWNERS
+++ b/docs/contribute/governance/rfcs/OWNERS
@@ -1,7 +1,7 @@
 # Fuchsia Eng Council
 
-abarth@google.com        # Chair
-cpu@google.com
-jamesr@google.com
-pascallouis@google.com
-vaas@google.com
+abarth@google.com        # Adam Barth (Chair)
+cpu@google.com           # Carlos Pizano 
+jamesr@google.com        # James Robinson
+pascallouis@google.com   # Pascal Perez
+vaas@google.com          # Vaas Krishnamurthy
diff --git a/docs/contribute/governance/rfcs/README.md b/docs/contribute/governance/rfcs/README.md
index 32c9523..30f7aa3 100644
--- a/docs/contribute/governance/rfcs/README.md
+++ b/docs/contribute/governance/rfcs/README.md
@@ -1,10 +1,13 @@
-# Fuchsia RFC process
+{% include "docs/contribute/governance/rfcs/_common/_rfc_header.md" %}
+
+# Fuchsia RFCs
 
 The Fuchsia RFC process is intended to provide a consistent and transparent path
 for making project-wide, technical decisions. For example, the RFC process can
 be used to evolve the project roadmap and the system architecture.
 
-The RFC process is detailed in [RFC-0001: Fuchsia Request for Comments process](0001_rfc_process.md), along with [RFC-0006: Addendum of the RFC process for Zircon](0006_addendum_to_rfc_process_for_zircon.md).
+The RFC process evolves over time, and can be read here in its [detailed current form]
+(current_rfc_process.md). It is also summarized below.
 
 ## Summary of the process
 
@@ -12,8 +15,11 @@
 - Socialize your idea.
 - Draft your RFC using this [template](TEMPLATE.md).
 - Iterate your idea with appropriate stakeholders.
-- After stakeholders signoff, email eng-council@fuchsia.dev to prompt the Eng Council to decide whether to accept your RFC.
-- If your RFC is accepted, a member of the Eng Council will comment on your change stating that the RFC is accepted, will assign the RFC a number and mark your change Code-Review +2. Your RFC can now be landed.
+- After stakeholders signoff, email <eng-council@fuchsia.dev> to prompt the Eng
+  Council to decide whether to accept your RFC.
+- If your RFC is accepted, a member of the Eng Council will comment on your
+  change stating that the RFC is accepted, will assign the RFC a number and
+  mark your change Code-Review +2. Your RFC can now be landed.
 
 ## Criteria for requiring an RFC {#criteria}
 
@@ -30,11 +36,12 @@
 
 In addition, changes in the source directories:
 
-- /zircon
-- /src/zircon
-- /src/bringup
+- `/zircon`
+- `/src/zircon`
+- `/src/bringup`
 
-that meet the following criteria must use the RFC process as described in [RFC0006: Addendum of the RFC Process for Zircon](0006_addendum_to_rfc_process_for_zircon.md).
+that meet the following criteria must use the RFC process as described in
+[RFC0006: Addendum of the RFC Process for Zircon](0006_addendum_to_rfc_process_for_zircon.md).
 
 - Adding or removing Zircon system interfaces.
 - Changing resource handling behaviors.
@@ -44,31 +51,72 @@
 - Adding or Downgrading support for a platform.
 - New build configurations.
 
-Note: Documents are sorted by date reviewed.
+## Process to submit an RFC
 
-## Active RFCs
+Once you are familiarized with the RFC guidelines and area ready to send
+an RFC proposal for review, see [Creating a RFC](create_rfc.md).
+
+## Proposals
+
+### Active RFCs
 
 [Gerrit link](https://fuchsia-review.googlesource.com/q/dir:docs/contribute/governance/rfcs+is:open)
 
-## Accepted proposals
+### Finalized RFCs
 
-RFC                                                     | Submitted  | Reviewed   | Title
-------------------------------------------------------- | ---------- | ---------- | -----
-[RFC-0001](0001_rfc_process.md)                         | 2020-02-20 | 2020-02-27 | Fuchsia Request for Comments (RFC) process
-[RFC-0002](0002_platform_versioning.md)                 | 2020-03-30 | 2020-04-23 | Fuchsia Platform Versioning
-[RFC-0003](0003_logging.md)                             | 2020-06-03 | 2020-06-10 | Fuchsia Logging Guidelines
-[RFC-0004](0004_units_of_bytes.md)                      | 2020-06-09 | 2020-07-31 | Units of Bytes
-[RFC-0005](0005_blobfs_snapshots.md)                    | 2020-09-07 | 2020-09-19 | Blobfs Snapshots
-[RFC-0006](0006_addendum_to_rfc_process_for_zircon.md)  | 2020-08-17 | 2020-09-24 | Addendum of the RFC Process for Zircon
-[RFC-0007](0007_remove_thread_killing.md)               | 2020-09-25 | 2020-10-06 | Zircon Removal of Thread Killing
-[RFC-0008](0008_remove_zx_clock_get_and_adjust.md)      | 2020-10-21 | 2020-10-29 | Remove zx_clock_get and zx_clock_adjust
-[RFC-0009](0009_edge_triggered_async_wait.md)           | 2020-10-22 | 2020-11-04 | Edge triggered async_wait
-[RFC-0010](0010_channel_iovec.md)                       | 2020-10-01 | 2020-11-06 | zx_channel_iovec_t support for zx_channel_write and zx_channel_call
-[RFC-0011](0011_getinfo_kmemstats_extended.md)          | 2020-11-04 | 2020-11-20 | zx_object_get_info ZX_INFO_KMEM_STATS_EXTENDED
+<div class="form-checkbox">
+<devsite-expandable id="rfc-area">
+  <h4 class="showalways">RFC area</h4>
+<form id="filter-checkboxes-reset">
+  {% for area in areas %}
+    {% set found=false %}
+    {% for rfc in rfcs %}
+        {% for rfca in rfc.area %}
+          {% if rfca == area %}
+            {% set found=true %}
+          {% endif %}
+        {% endfor %}
+    {% endfor %}
+    {% if found %}
+      <div class="checkbox-div">
+        <input type="checkbox" id="checkbox-reset-{{ area }}" checked>
+        <label for="checkbox-reset-{{ area }}">{{ area }}</label>
+      </div>
+    {% endif %}
+  {% endfor %}
+  <br>
+  <br>
+  <button class="select-all">Select all</button>
+  <button class="clear-all">Clear all</button>
+  <hr>
+  <div class="see-rfcs">
+    <div class="rfc-left">
+      <p><a href="#accepted-rfc">Accepted RFCs</a></p>
+    </div>
+    <div class="rfc-right">
+      <p><a href="#rejected-rfc">Rejected RFCs</a></p>
+    </div>
+  </div>
+</form>
+</devsite-expandable>
 
-## Rejected proposals
+<a name="accepted-rfc"><h3 class="hide-from-toc">Accepted</h3></a>
+{% include "docs/contribute/governance/rfcs/_common/_index_table_header.md" %}
+{% for rfc in rfcs %}
+    {% if rfc.status == "Accepted" %}
+        {% include "docs/contribute/governance/rfcs/_common/_index_table_body.md" %}
+    {% endif %}
+{% endfor %}
+{% include "docs/contribute/governance/rfcs/_common/_index_table_footer.md" %}
 
-RFC      | Submitted | Reviewed | Title
--------- | --------- | -------- | ------
-_(none)_ | &nbsp;    | &nbsp;   | &nbsp;
+<a name="rejected-rfc"><h3 class="hide-from-toc">Rejected</h3></a>
+{% include "docs/contribute/governance/rfcs/_common/_index_table_header.md" %}
+{% for rfc in rfcs %}
+    {% if rfc.status == "Rejected" %}
+        {% include "docs/contribute/governance/rfcs/_common/_index_table_body.md" %}
+    {% endif %}
+{% endfor %}
+{% include "docs/contribute/governance/rfcs/_common/_index_table_footer.md" %}
 
+{# This div is used to close the filter that is initialized above #}
+</div>
diff --git a/docs/contribute/governance/rfcs/TEMPLATE.md b/docs/contribute/governance/rfcs/TEMPLATE.md
index 8916fa0..30b254d 100644
--- a/docs/contribute/governance/rfcs/TEMPLATE.md
+++ b/docs/contribute/governance/rfcs/TEMPLATE.md
@@ -1,14 +1,11 @@
-# Fuchsia RFC Template (RFC-NNNN)
+{% set rfcid = "Template" %}
+{% include "docs/contribute/governance/rfcs/_common/_rfc_header.md" %}
+# {{ rfc.name }} - {{ rfc.title }}
+<!-- *** DO NOT EDIT ABOVE THIS LINE -->
 
-One-sentence summary goes here.
-
-Field     | Value
-----------|--------------------------
-Status    | Draft
-Author(s) | *your names*
-Submitted | YYYY-MM-DD
-Reviewed  | *leave blank until reviewed*
-Issue     | *link to bugs.fuchsia.dev issue*
+<!--
+*** This should begin with an H2 element (for example, ## Summary).
+-->
 
 ## Summary
 
@@ -25,9 +22,9 @@
 ## Implementation
 
 How will you go about implementing this design? Can the change be made in a
-single CL or does the change involve a complex migration of third-party
-dependencies? Do you plan to structure the implementation into phases? What
-dependencies exist at each phase?
+single Gerrit change or does the change involve a complex migration of
+third-party dependencies? Do you plan to structure the implementation
+into phases? What dependencies exist at each phase?
 
 ## Performance
 
diff --git a/docs/contribute/governance/rfcs/_areas.yaml b/docs/contribute/governance/rfcs/_areas.yaml
new file mode 100644
index 0000000..47ffcd3
--- /dev/null
+++ b/docs/contribute/governance/rfcs/_areas.yaml
@@ -0,0 +1,39 @@
+# Copyright 2020 The Fuchsia Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+- Bringup
+- Camera
+- Cast
+- Chromium
+- Cobalt
+- Connectivity
+- Dart
+- Developer
+- Devices
+- Diagnostics
+- Factory
+- FIDL
+- Firmware
+- Flutter
+- Fonts
+- General
+- Governance
+- Graphics
+- HWinfo
+- Identity
+- Internationalization
+- lib
+- Media
+- Modular
+- Power
+- Recovery
+- Security
+- Session
+- Speech
+- Storage
+- System
+- Testing
+- UI
+- Virtualization
+- Zircon
diff --git a/docs/contribute/governance/rfcs/_common/_index_table_body.md b/docs/contribute/governance/rfcs/_common/_index_table_body.md
new file mode 100644
index 0000000..dbdad34
--- /dev/null
+++ b/docs/contribute/governance/rfcs/_common/_index_table_body.md
@@ -0,0 +1,21 @@
+  <tr>
+    <td><p>{{ rfc.name }}</p><h3 class="add-link" style="display:none">{{ rfc.name }} - {{ rfc.title }}</h3></td>
+    <td>
+        <p>
+          <a href="{{ rfc.file }}">{{ rfc.title }}</a>
+        </p>
+      </td>
+      <td>
+        <ul class="comma-list">
+        {% for area in rfc.area %}
+          <li>{{ area }}</li>
+        {% endfor %}
+        </ul>
+      <td>
+        <ul class="comma-list">
+        {% for change in rfc.gerrit_change_id %}
+          <li><a href="{{ gerrit_change_url }}{{ change }}">{{ change }}</a></li>
+        {% endfor %}
+        </ul>
+    </td>
+  </tr>
diff --git a/docs/contribute/governance/rfcs/_common/_index_table_footer.md b/docs/contribute/governance/rfcs/_common/_index_table_footer.md
new file mode 100644
index 0000000..7cf3e10
--- /dev/null
+++ b/docs/contribute/governance/rfcs/_common/_index_table_footer.md
@@ -0,0 +1,4 @@
+    </tbody>
+    </table>
+  </div>
+</devsite-filter>
diff --git a/docs/contribute/governance/rfcs/_common/_index_table_header.md b/docs/contribute/governance/rfcs/_common/_index_table_header.md
new file mode 100644
index 0000000..f7434db
--- /dev/null
+++ b/docs/contribute/governance/rfcs/_common/_index_table_header.md
@@ -0,0 +1,18 @@
+<devsite-filter checkbox-form-id="filter-checkboxes-reset" sortable="0">
+  <div>
+    <table class="fixed">
+      <colgroup>
+        <col width="10%">
+        <col width="60%">
+        <col width="15%">
+        <col width="15%">
+      </colgroup>
+      <thead>
+        <tr>
+          <th>RFC</th>
+          <th>Title</th>
+          <th>Area</th>
+          <th>Gerrit change</th>
+        </tr>
+      </thead>
+    <tbody class="list">
diff --git a/docs/contribute/governance/rfcs/_common/_rfc_header.md b/docs/contribute/governance/rfcs/_common/_rfc_header.md
new file mode 100644
index 0000000..f07776f
--- /dev/null
+++ b/docs/contribute/governance/rfcs/_common/_rfc_header.md
@@ -0,0 +1,115 @@
+{# This file is used to define the objects and css style for RFC pages #}
+{% set gerrit_profile = "https://fuchsia-review.googlesource.com/q/owner:" %}
+{% set gerrit_change_url = "https://fuchsia-review.googlesource.com/c/fuchsia/+/" %}
+{% set fuchsia_source_tree = "https://fuchsia.googlesource.com/fuchsia/+/master/" %}
+{% set fuchsia_editor = "https://ci.android.com/edit?repo=fuchsia/fuchsia/master&file=" %}
+{% set issue_url = "https://fxbug.dev/" %}
+{% set rfcs_dir = "docs/contribute/governance/rfcs/" %}
+{% set rfcs_metadata_file = "_rfcs.yaml" %}
+{% set eng_council_yaml_file = "_eng_council.yaml" %}
+{% set areas_yaml_file = "_areas.yaml" %}
+
+{% set rfcs | yamlloads %}
+{% include "docs/contribute/governance/rfcs/_rfcs.yaml" %}
+{% endset %}
+
+{% set areas | yamlloads %}
+{% include "docs/contribute/governance/rfcs/_areas.yaml" %}
+{% endset %}
+
+{% set eng_council | yamlloads %}
+{% include "docs/contribute/governance/rfcs/_eng_council.yaml" %}
+{% endset %}
+
+{% if rfcid %}
+    {% for _rfc in rfcs %}
+        {% if _rfc.name == rfcid %}
+            {% set rfc=_rfc %}
+            {% include "docs/contribute/governance/rfcs/_common/_rfc_metadata.md" %}
+            {% set found=true %}
+        {% endif %}
+    {% endfor %}
+    {% if not found %}
+      <h2> ERROR! Invalid RFC number: {{ rfcid }} </h2>
+      There must be an entry with "name: {{ rfcid }}" in file {{ rfcs_dir }}{{ rfcs_metadata_file }}
+    {% endif %}