[release] Snap to 5fe4f0fc79
Change-Id: I1472cc50620c00dcc7ecc77cc94fe9401edb5357
diff --git a/build/drivers/all_drivers_list.txt b/build/drivers/all_drivers_list.txt
index d93c6e3..0b4f78d 100644
--- a/build/drivers/all_drivers_list.txt
+++ b/build/drivers/all_drivers_list.txt
@@ -236,6 +236,8 @@
 //src/devices/tests/v2/interop:leaf_driver
 //src/devices/tests/v2/interop:root_driver
 //src/devices/tests/v2/interop:v1_driver
+//src/devices/tests/v2/runtime-dispatcher:leaf-driver
+//src/devices/tests/v2/runtime-dispatcher:root-driver
 //src/devices/tests/v2/services:root_driver
 //src/devices/thermal/drivers/aml-thermal-s905d2g-legacy:aml-thermal-s905d2g-legacy-driver
 //src/devices/thermal/drivers/aml-thermal-s905d2g:aml-thermal-s905d2g-driver
diff --git a/build/drivers/create_component_manifest.py b/build/drivers/create_component_manifest.py
index 83521d6..ec67df7 100644
--- a/build/drivers/create_component_manifest.py
+++ b/build/drivers/create_component_manifest.py
@@ -58,6 +58,12 @@
         nargs="*",
         help='A space separated list of composite parents',
     )
+    parser.add_argument(
+        '--default_dispatcher_opts',
+        nargs="*",
+        help=
+        'A space separated list of options for creating the default dispatcher',
+    )
 
     args = parser.parse_args()
 
@@ -155,6 +161,9 @@
             {'protocol': "fuchsia.scheduler.ProfileProvider"})
     if args.sysmem:
         manifest['use'].append({'protocol': "fuchsia.sysmem.Allocator"})
+    if args.default_dispatcher_opts:
+        manifest["program"][
+            "default_dispatcher_opts"] = args.default_dispatcher_opts
 
     json_manifest = json.dumps(manifest)
     args.output.write(json_manifest)
diff --git a/build/drivers/fuchsia_driver_component.gni b/build/drivers/fuchsia_driver_component.gni
index a6c65c6..0fcfca6 100644
--- a/build/drivers/fuchsia_driver_component.gni
+++ b/build/drivers/fuchsia_driver_component.gni
@@ -63,6 +63,11 @@
 #     nodes, and the driver's CML file will accept capabilities from each.
 #     Type: array of strings
 #     Default: []
+#   default_dispatcher_opts (optional)
+#     If this is set, then the default dispatcher for the driver will be
+#     created with these options.
+#     Type: array of strings
+#     Default: []
 #   info (mandatory for //src and //zircon)
 #     Name of the file containing the driver information file.
 #     Type: file
@@ -137,6 +142,13 @@
           args += [ "--composite" ]
           args += invoker.composite_nodes
         }
+
+        # TODO(fxbug.dev/99310): set allow_sync_calls for v1 drivers
+        # if they did not specify any opts.
+        if (defined(invoker.default_dispatcher_opts)) {
+          args += [ "--default_dispatcher_opts" ]
+          args += invoker.default_dispatcher_opts
+        }
       }
 
       full_manifest_target = "${target_name}_full_manifest"
diff --git a/build/images/network-conformance/assemble_network_conformance_system.gni b/build/images/network-conformance/assemble_network_conformance_system.gni
index b9150a0..23128c1 100644
--- a/build/images/network-conformance/assemble_network_conformance_system.gni
+++ b/build/images/network-conformance/assemble_network_conformance_system.gni
@@ -20,6 +20,11 @@
   core_realm("core") {
     testonly = true
 
+    deps = [
+      # Need ssh to test the system
+      "//src/developer/sshd-host:core_shard",
+    ]
+
     package_name = core_name
     restrict_persistent_storage = false
     restrict_full_resolver_to_base = false
diff --git a/build/images/recovery/BUILD.gn b/build/images/recovery/BUILD.gn
index db23420..8232238 100644
--- a/build/images/recovery/BUILD.gn
+++ b/build/images/recovery/BUILD.gn
@@ -6,7 +6,6 @@
 import("//build/images/vboot/vboot.gni")
 import("//build/images/zedboot/zedboot_args.gni")
 import("//build/testing/host_test_data.gni")
-import("//src/developer/sshd-host/fxbug_dev_90440.gni")
 import("//src/sys/core/build/core.gni")
 import("//src/sys/root/build/root.gni")
 
@@ -70,16 +69,24 @@
 
 core_name = "core-recovery"
 
+_core_realm_deps = [
+  "//src/connectivity/wlan/wlancfg:wlancfg-core-shard",
+  "//src/connectivity/wlan/wlandevicemonitor:wlandevicemonitor-core-shard",
+  "//src/connectivity/wlan/wlanstack:wlanstack-core-shard",
+  "//src/recovery/factory_reset:factory_reset_core_shard",
+  "//src/ui/bin/brightness_manager:auto-brightness_core_shard",
+]
+
 core_realm("core") {
   package_name = core_name
   restrict_persistent_storage = false
-  deps = [
-    "//src/connectivity/wlan/wlancfg:wlancfg-core-shard",
-    "//src/connectivity/wlan/wlandevicemonitor:wlandevicemonitor-core-shard",
-    "//src/connectivity/wlan/wlanstack:wlanstack-core-shard",
-    "//src/recovery/factory_reset:factory_reset_core_shard",
-    "//src/ui/bin/brightness_manager:auto-brightness_core_shard",
-  ]
+  deps = _core_realm_deps
+}
+
+core_realm("core-eng") {
+  package_name = core_name
+  restrict_persistent_storage = false
+  deps = _core_realm_deps + [ "//src/developer/sshd-host:core_shard" ]
 }
 
 root_realm("root") {
@@ -87,7 +94,6 @@
 }
 
 recovery_packages = [
-  ":core",
   "//build/info:build-info",
   "//garnet/bin/sysmgr",
   "//src/connectivity/location/regulatory_region:regulatory_region",
@@ -154,19 +160,9 @@
 
 assemble_system("recovery-eng") {
   forward_variables_from(recovery_base, "*")
-  base_packages = recovery_eng_packages
+  base_packages = recovery_eng_packages + [ ":core-eng" ]
   system_image_deps = recovery_system_image_deps
-
-  # TODO(fxbug.dev/90440): uncomment the below
-  #sysmgr_golden = "sysmgr-eng-golden.json"
-  # TODO(fxbug.dev/90440): delete the below
-  if (sshd_host_component == "cmx") {
-    sysmgr_golden = "sysmgr-eng-golden.json"
-  } else if (sshd_host_component == "cml") {
-    sysmgr_golden = "sysmgr-eng-golden-fxbug-dev-90440.json"
-  } else {
-    assert(false, "Invalid value sshd_host_component = $sshd_host_component")
-  }
+  sysmgr_golden = "sysmgr-eng-golden.json"
 
   # TODO(fxbug.dev/81569): Move this metadata to assemble_system.gni.
   metadata = {
@@ -196,7 +192,7 @@
 }
 
 assemble_system("recovery-fdr") {
-  base_packages = recovery_packages
+  base_packages = recovery_packages + [ ":core" ]
   generate_fvm = !bootfs_only
 
   system_image_deps = recovery_system_image_deps
@@ -242,19 +238,9 @@
 
 assemble_system("recovery-installer") {
   forward_variables_from(recovery_base, "*")
-  base_packages = recovery_installer_packages
+  base_packages = recovery_installer_packages + [ ":core-eng" ]
   system_image_deps = recovery_system_image_deps
-
-  # TODO(fxbug.dev/90440): uncomment the below
-  #sysmgr_golden = "sysmgr-installer-golden.json"
-  # TODO(fxbug.dev/90440): delete the below
-  if (sshd_host_component == "cmx") {
-    sysmgr_golden = "sysmgr-installer-golden.json"
-  } else if (sshd_host_component == "cml") {
-    sysmgr_golden = "sysmgr-installer-golden-fxbug-dev-90440.json"
-  } else {
-    assert(false, "Invalid value sshd_host_component = $sshd_host_component")
-  }
+  sysmgr_golden = "sysmgr-installer-golden.json"
 
   # The installer needs to see all partitions on the system so that it can
   #determine the installation source and destination
diff --git a/build/images/recovery/sysmgr-eng-golden-fxbug-dev-90440.json b/build/images/recovery/sysmgr-eng-golden-fxbug-dev-90440.json
deleted file mode 100644
index 861dc56..0000000
--- a/build/images/recovery/sysmgr-eng-golden-fxbug-dev-90440.json
+++ /dev/null
@@ -1,9 +0,0 @@
-{
-    "apps": [
-        "fuchsia-pkg://fuchsia.com/system_recovery#meta/system_recovery.cmx",
-        "fuchsia-pkg://fuchsia.com/timekeeper#meta/timekeeper.cmx"
-    ],
-    "update_dependencies": [
-        "fuchsia.bogus.DoesNotExist"
-    ]
-}
diff --git a/build/images/recovery/sysmgr-eng-golden.json b/build/images/recovery/sysmgr-eng-golden.json
index 919bdf8..861dc56 100644
--- a/build/images/recovery/sysmgr-eng-golden.json
+++ b/build/images/recovery/sysmgr-eng-golden.json
@@ -1,8 +1,7 @@
 {
     "apps": [
         "fuchsia-pkg://fuchsia.com/system_recovery#meta/system_recovery.cmx",
-        "fuchsia-pkg://fuchsia.com/timekeeper#meta/timekeeper.cmx",
-        "fuchsia-pkg://fuchsia.com/sshd-host#meta/sshd-host.cmx"
+        "fuchsia-pkg://fuchsia.com/timekeeper#meta/timekeeper.cmx"
     ],
     "update_dependencies": [
         "fuchsia.bogus.DoesNotExist"
diff --git a/build/images/recovery/sysmgr-installer-golden-fxbug-dev-90440.json b/build/images/recovery/sysmgr-installer-golden-fxbug-dev-90440.json
deleted file mode 100644
index 3f0b42a..0000000
--- a/build/images/recovery/sysmgr-installer-golden-fxbug-dev-90440.json
+++ /dev/null
@@ -1,9 +0,0 @@
-{
-    "apps": [
-        "fuchsia-pkg://fuchsia.com/timekeeper#meta/timekeeper.cmx",
-        "fuchsia-pkg://fuchsia.com/system_recovery_installer#meta/system_recovery_installer.cmx"
-    ],
-    "update_dependencies": [
-        "fuchsia.bogus.DoesNotExist"
-    ]
-}
diff --git a/build/images/recovery/sysmgr-installer-golden.json b/build/images/recovery/sysmgr-installer-golden.json
index e0482c8..3f0b42a 100644
--- a/build/images/recovery/sysmgr-installer-golden.json
+++ b/build/images/recovery/sysmgr-installer-golden.json
@@ -1,7 +1,6 @@
 {
     "apps": [
         "fuchsia-pkg://fuchsia.com/timekeeper#meta/timekeeper.cmx",
-        "fuchsia-pkg://fuchsia.com/sshd-host#meta/sshd-host.cmx",
         "fuchsia-pkg://fuchsia.com/system_recovery_installer#meta/system_recovery_installer.cmx"
     ],
     "update_dependencies": [
diff --git a/build/images/size_checker/BUILD.gn b/build/images/size_checker/BUILD.gn
index 8a6f338..3958cf2 100644
--- a/build/images/size_checker/BUILD.gn
+++ b/build/images/size_checker/BUILD.gn
@@ -88,7 +88,10 @@
     _size_budgets_blobfs_file,
     _blobs_config,
   ]
-  outputs = [ "$target_out_dir/size_report_blobfs.json" ]
+  outputs = [
+    "$target_out_dir/size_report_blobfs.json",
+    "$target_out_dir/verbose_output_blobfs.json",
+  ]
   testonly = true
   args = [
     "--config",
@@ -103,6 +106,8 @@
     "deprecated_padded",
     "--gerrit-output",
     rebase_path("$target_out_dir/size_report_blobfs.json", root_build_dir),
+    "--verbose-json-output",
+    rebase_path("$target_out_dir/verbose_output_blobfs.json", root_build_dir),
   ]
 }
 
@@ -139,7 +144,10 @@
     _size_budgets_non_blobfs_file,
     "$root_build_dir/obj/build/images/fuchsia/update/update_package_manifest.json",
   ]
-  outputs = [ "$target_out_dir/size_report_non_blobfs.json" ]
+  outputs = [
+    "$target_out_dir/size_report_non_blobfs.json",
+    "$target_out_dir/verbose_output_non_blobfs.json",
+  ]
   testonly = true
   args = [
     "--config",
@@ -152,6 +160,21 @@
     "deprecated_padded",
     "--gerrit-output",
     rebase_path("$target_out_dir/size_report_non_blobfs.json", root_build_dir),
+    "--verbose-json-output",
+    rebase_path("$target_out_dir/verbose_output_non_blobfs.json",
+                root_build_dir),
+  ]
+}
+
+json_merge("verbose_output.json") {
+  sources = [
+    "$target_out_dir/verbose_output_blobfs.json",
+    "$target_out_dir/verbose_output_non_blobfs.json",
+  ]
+  testonly = true
+  deps = [
+    ":verify_size_budget_blobfs",
+    ":verify_size_budget_non_blobfs",
   ]
 }
 
@@ -162,6 +185,7 @@
   ]
   testonly = true
   deps = [
+    ":verbose_output.json",
     ":verify_size_budget_blobfs",
     ":verify_size_budget_non_blobfs",
   ]
diff --git a/examples/BUILD.gn b/examples/BUILD.gn
index ae7bbf2..977d00a 100644
--- a/examples/BUILD.gn
+++ b/examples/BUILD.gn
@@ -13,7 +13,6 @@
     "diagnostics",
     "drivers",
     "fidl",
-    "fortune",
     "fuzzers",
     "hello_world",
     "intl",
diff --git a/examples/assembly/structured_config/configured_by_assembly/BUILD.gn b/examples/assembly/structured_config/configured_by_assembly/BUILD.gn
index 93de6d2..4e36dc2 100644
--- a/examples/assembly/structured_config/configured_by_assembly/BUILD.gn
+++ b/examples/assembly/structured_config/configured_by_assembly/BUILD.gn
@@ -3,10 +3,17 @@
 # found in the LICENSE file.
 
 import("//build/components.gni")
+import("//build/rust/rustc_binary.gni")
+
+rustc_binary("bin") {
+  output_name = "to_configure"
+  sources = [ "src/main.rs" ]
+}
 
 fuchsia_component("to_configure") {
   manifest = "meta/to_configure.cml"
   restricted_features = [ "structured_config" ]
+  deps = [ ":bin" ]
 }
 
 # NOTE: this should not be included in build graphs except when product assembly is configured
diff --git a/examples/assembly/structured_config/configured_by_assembly/meta/to_configure.cml b/examples/assembly/structured_config/configured_by_assembly/meta/to_configure.cml
index 14876ad..36193669 100644
--- a/examples/assembly/structured_config/configured_by_assembly/meta/to_configure.cml
+++ b/examples/assembly/structured_config/configured_by_assembly/meta/to_configure.cml
@@ -2,6 +2,12 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 {
+    // TODO(https://fxbug.dev/97805) remove this shard once the empty binary doesn't need it
+    include: [ "syslog/client.shard.cml" ],
+    program: {
+        runner: "elf",
+        binary: "bin/to_configure",
+    },
     config: {
         not_from_package: { type: "uint8" },
     },
diff --git a/examples/assembly/structured_config/configured_by_assembly/src/main.rs b/examples/assembly/structured_config/configured_by_assembly/src/main.rs
new file mode 100644
index 0000000..f080382
--- /dev/null
+++ b/examples/assembly/structured_config/configured_by_assembly/src/main.rs
@@ -0,0 +1,5 @@
+// Copyright 2022 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+fn main() {}
diff --git a/examples/fortune/BUILD.gn b/examples/fortune/BUILD.gn
index 7a38683..32efc94 100644
--- a/examples/fortune/BUILD.gn
+++ b/examples/fortune/BUILD.gn
@@ -1,15 +1,9 @@
-# Copyright 2016 The Fuchsia Authors. All rights reserved.
+# Copyright 2022 The Fuchsia Authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-import("//build/components.gni")
+import("//build/packages/prebuilt_package.gni")
 
-executable("bin") {
-  output_name = "fortune"
-
-  sources = [ "fortune.c" ]
-}
-
-fuchsia_shell_package("fortune") {
-  deps = [ ":bin" ]
+prebuilt_package("fortune_teller") {
+  archive = "//prebuilt/sdk-samples/fortune-teller/fortune_teller"
 }
diff --git a/examples/fortune/README.md b/examples/fortune/README.md
deleted file mode 100644
index b128c59..0000000
--- a/examples/fortune/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Fortune
-
-A utility that prints aphorisms.
diff --git a/examples/fortune/fortune.c b/examples/fortune/fortune.c
deleted file mode 100644
index 31f2d1b..0000000
--- a/examples/fortune/fortune.c
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2016 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <time.h>
-
-static const size_t kFortunesSize = 4;
-static const char* kFortunes[4] = {
-    "If we have data, let’s look at data. If all we have are opinions, let’s "
-    "go\n"
-    "with mine. -- Jim Barksdale",
-    "Things that are impossible just take longer.",
-    "Better lucky than good.",
-    "Fortune favors the bold.",
-};
-
-int main(int argc, char** argv) {
-  srand((unsigned int)time(0));
-  printf("%s\n", kFortunes[rand() % kFortunesSize]);
-  return 0;
-}
diff --git a/products/common/workstation.gni b/products/common/workstation.gni
index fbc07fc..5a1d989 100644
--- a/products/common/workstation.gni
+++ b/products/common/workstation.gni
@@ -92,6 +92,7 @@
   "//src/session",
   "//src/ui/bin/terminal",
   "//src/ui/scenic",
+  "//examples/fortune:fortune_teller",
 
   # Starnix
   "//src/proc/bundles:starnix",
diff --git a/products/core.gni b/products/core.gni
index c31c335..05d44ec 100644
--- a/products/core.gni
+++ b/products/core.gni
@@ -36,6 +36,7 @@
   "//src/developer/memory/monitor:memory_monitor_core_shard",
   "//src/developer/remote-control:laboratory-core-shard",
   "//src/devices/bin/driver_playground:driver-playground-core-shard",
+  "//src/developer/sshd-host:core_shard",
   "//src/intl/intl_services:intl_services_small_shard",
   "//src/media/audio/audio_core/v2:core_shard",
   "//src/recovery/factory_reset:factory_reset_core_shard",
diff --git a/products/terminal.gni b/products/terminal.gni
index 883d184..c15eb3d 100644
--- a/products/terminal.gni
+++ b/products/terminal.gni
@@ -90,10 +90,3 @@
   # Only for products in which Flutter is supported.
   "//src/tests/intl/timezone:tests-for-flutter",
 ]
-
-# TODO(fxbug.dev/90440): delete the below after sshd-host.cml is used everywhere
-import("//src/developer/sshd-host/fxbug_dev_90440.gni")
-sshd_host_component = "cml"
-
-# TODO(fxbug.dev/90440): move this to core.gni
-core_realm_shards += [ "//src/developer/sshd-host:core_shard" ]
diff --git a/products/workstation_eng.gni b/products/workstation_eng.gni
index 6472a69..2a9e3a6 100644
--- a/products/workstation_eng.gni
+++ b/products/workstation_eng.gni
@@ -41,6 +41,7 @@
   "//src/session",
   "//src/ui/bin/terminal",
   "//src/ui/scenic",
+  "//examples/fortune:fortune_teller",
 
   # Starnix
   "//src/proc/bundles:starnix",
@@ -133,10 +134,3 @@
 
 # Use Fxfs for the data partition.
 data_filesystem_format = "fxfs"
-
-# TODO(fxbug.dev/90440): delete the below after sshd-host.cml is used everywhere
-import("//src/developer/sshd-host/fxbug_dev_90440.gni")
-sshd_host_component = "cml"
-
-# TODO(fxbug.dev/90440): move this to core.gni
-core_realm_shards += [ "//src/developer/sshd-host:core_shard" ]
diff --git a/sdk/fidl/fuchsia.driver.framework/driver_host.fidl b/sdk/fidl/fuchsia.driver.framework/driver_host.fidl
index bef6d88..5ada68b 100644
--- a/sdk/fidl/fuchsia.driver.framework/driver_host.fidl
+++ b/sdk/fidl/fuchsia.driver.framework/driver_host.fidl
@@ -34,6 +34,11 @@
     ///      specifying whether the driver should be colocated in the same
     ///      driver host as the driver that added |node|. If not specified, the
     ///      driver will be launched in a new driver host.
+    ///   3. "default_dispatcher_opts" (optional): an array of strings specifying
+    ///      the options for creating the default dispatcher. A string can be one
+    ///      of the following:
+    ///      * `allow_sync_calls`: allows synchronous calls to be done on the
+    ///      default dispatcher's thread.
     4: program fuchsia.data.Dictionary;
 
     /// Incoming namespace provided to the driver.
diff --git a/sdk/lib/dev-operation/dev-operation.api b/sdk/lib/dev-operation/dev-operation.api
index f622a375..d5ccbb0 100644
--- a/sdk/lib/dev-operation/dev-operation.api
+++ b/sdk/lib/dev-operation/dev-operation.api
@@ -1,10 +1,10 @@
 {
-  "pkg/dev-operation-experimental-driver-only/include/lib/operation/helpers/algorithm.h": "239bda5fb55dba9e1dec78fa6cd05686",
-  "pkg/dev-operation-experimental-driver-only/include/lib/operation/helpers/alloc_checker.h": "371f86aa54cfc5eaa8c6d4c5b3be26a3",
-  "pkg/dev-operation-experimental-driver-only/include/lib/operation/helpers/intrusive_container_node_utils.h": "9aa77e345522b1175144160db0ebf7f5",
-  "pkg/dev-operation-experimental-driver-only/include/lib/operation/helpers/intrusive_container_utils.h": "8c5677e0bd821faee59b652080170ea4",
-  "pkg/dev-operation-experimental-driver-only/include/lib/operation/helpers/intrusive_double_list.h": "688df494beda930ddd42143074dc9789",
-  "pkg/dev-operation-experimental-driver-only/include/lib/operation/helpers/intrusive_pointer_traits.h": "f5b8fec582c67587e179edfeb6d6e340",
-  "pkg/dev-operation-experimental-driver-only/include/lib/operation/helpers/macros.h": "1505271d3fd921b56797ad2d8a18d519",
-  "pkg/dev-operation-experimental-driver-only/include/lib/operation/operation.h": "e0ec1d96816228ab683da4124dde00ba"
-}
\ No newline at end of file
+  "pkg/dev-operation/include/lib/operation/helpers/algorithm.h": "239bda5fb55dba9e1dec78fa6cd05686",
+  "pkg/dev-operation/include/lib/operation/helpers/alloc_checker.h": "371f86aa54cfc5eaa8c6d4c5b3be26a3",
+  "pkg/dev-operation/include/lib/operation/helpers/intrusive_container_node_utils.h": "9aa77e345522b1175144160db0ebf7f5",
+  "pkg/dev-operation/include/lib/operation/helpers/intrusive_container_utils.h": "8c5677e0bd821faee59b652080170ea4",
+  "pkg/dev-operation/include/lib/operation/helpers/intrusive_double_list.h": "688df494beda930ddd42143074dc9789",
+  "pkg/dev-operation/include/lib/operation/helpers/intrusive_pointer_traits.h": "f5b8fec582c67587e179edfeb6d6e340",
+  "pkg/dev-operation/include/lib/operation/helpers/macros.h": "1505271d3fd921b56797ad2d8a18d519",
+  "pkg/dev-operation/include/lib/operation/operation.h": "e0ec1d96816228ab683da4124dde00ba"
+}
diff --git a/sdk/lib/driver2/start_args.h b/sdk/lib/driver2/start_args.h
index e59e763..2b769e2 100644
--- a/sdk/lib/driver2/start_args.h
+++ b/sdk/lib/driver2/start_args.h
@@ -10,6 +10,8 @@
 #include <fidl/fuchsia.driver.framework/cpp/wire.h>
 #include <lib/zx/status.h>
 
+#include <vector>
+
 namespace driver {
 
 template <typename T>
@@ -54,6 +56,29 @@
   return zx::error(ZX_ERR_NOT_FOUND);
 }
 
+// Returns the list of values for |key| as a vector of strings.
+inline zx::status<std::vector<std::string>> ProgramValueAsVector(
+    const fuchsia_data::wire::Dictionary& program, std::string_view key) {
+  if (program.has_entries()) {
+    for (auto& entry : program.entries()) {
+      if (!std::equal(key.begin(), key.end(), entry.key.begin())) {
+        continue;
+      }
+      if (!entry.value.is_str_vec()) {
+        return zx::error(ZX_ERR_WRONG_TYPE);
+      }
+      auto& values = entry.value.str_vec();
+      std::vector<std::string> result;
+      result.reserve(values.count());
+      for (auto& value : values) {
+        result.emplace_back(std::string{value.data(), value.size()});
+      }
+      return zx::ok(result);
+    }
+  }
+  return zx::error(ZX_ERR_NOT_FOUND);
+}
+
 inline zx::status<fidl::UnownedClientEnd<fuchsia_io::Directory>> NsValue(
     const fidl::VectorView<fuchsia_component_runner::wire::ComponentNamespaceEntry>& entries,
     std::string_view path) {
diff --git a/sdk/lib/driver2/start_args_test.cc b/sdk/lib/driver2/start_args_test.cc
index 46d005b..9ea6de4 100644
--- a/sdk/lib/driver2/start_args_test.cc
+++ b/sdk/lib/driver2/start_args_test.cc
@@ -40,6 +40,36 @@
   EXPECT_EQ(ZX_ERR_NOT_FOUND, driver::ProgramValue(empty_program, "").error_value());
 }
 
+TEST(StartArgsTest, ProgramValueAsVector) {
+  fidl::Arena arena;
+  fidl::VectorView<fdata::wire::DictionaryEntry> program_entries(arena, 2);
+  program_entries[0].key.Set(arena, "key-for-str");
+  program_entries[0].value = fdata::wire::DictionaryValue::WithStr(arena, "value-for-str");
+
+  fidl::StringView strs[]{
+      fidl::StringView{"test"},
+      fidl::StringView{"test2"},
+  };
+  program_entries[1].key.Set(arena, "key-for-strvec");
+  program_entries[1].value = fdata::wire::DictionaryValue::WithStrVec(
+      arena, fidl::VectorView<fidl::StringView>::FromExternal(strs));
+
+  fdata::wire::Dictionary program(arena);
+  program.set_entries(arena, std::move(program_entries));
+
+  auto values = driver::ProgramValueAsVector(program, "key-for-strvec");
+  EXPECT_EQ(2lu, values->size());
+  std::sort(values->begin(), values->end());
+  EXPECT_EQ("test", (*values)[0]);
+  EXPECT_EQ("test2", (*values)[1]);
+
+  EXPECT_EQ(ZX_ERR_WRONG_TYPE, driver::ProgramValueAsVector(program, "key-for-str").error_value());
+  EXPECT_EQ(ZX_ERR_NOT_FOUND, driver::ProgramValueAsVector(program, "key-unkown").error_value());
+
+  fdata::wire::Dictionary empty_program;
+  EXPECT_EQ(ZX_ERR_NOT_FOUND, driver::ProgramValueAsVector(empty_program, "").error_value());
+}
+
 TEST(StartArgsTest, NsValue) {
   auto endpoints = fidl::CreateEndpoints<fuchsia_io::Directory>();
   ASSERT_EQ(ZX_OK, endpoints.status_value());
diff --git a/sdk/lib/driver_runtime/include/lib/fdf/cpp/dispatcher.h b/sdk/lib/driver_runtime/include/lib/fdf/cpp/dispatcher.h
index 7b4d98d..a63cafd 100644
--- a/sdk/lib/driver_runtime/include/lib/fdf/cpp/dispatcher.h
+++ b/sdk/lib/driver_runtime/include/lib/fdf/cpp/dispatcher.h
@@ -153,7 +153,7 @@
   }
 
   // Returns the options set for this dispatcher.
-  std::optional<uint32_t> options(fdf_dispatcher_t* dispatcher) {
+  std::optional<uint32_t> options() const {
     return dispatcher_ ? std::optional(fdf_dispatcher_get_options(dispatcher_)) : std::nullopt;
   }
 
diff --git a/sdk/lib/usb/usb.api b/sdk/lib/usb/usb.api
index 32275f5..c80aa36 100644
--- a/sdk/lib/usb/usb.api
+++ b/sdk/lib/usb/usb.api
@@ -1,5 +1,5 @@
 {
-  "pkg/usb-experimental-driver-only/include/usb/request-cpp.h": "430cc3ee3fe49298fa8cb1c5438f4170",
-  "pkg/usb-experimental-driver-only/include/usb/usb-request.h": "308031d833e2d6391a321817dd960177",
-  "pkg/usb-experimental-driver-only/include/usb/usb.h": "f283c9b1ce16cc946e79b1916743d1d6"
-}
\ No newline at end of file
+  "pkg/usb/include/usb/request-cpp.h": "430cc3ee3fe49298fa8cb1c5438f4170",
+  "pkg/usb/include/usb/usb-request.h": "308031d833e2d6391a321817dd960177",
+  "pkg/usb/include/usb/usb.h": "f283c9b1ce16cc946e79b1916743d1d6"
+}
diff --git a/sdk/lib/zxc/zxc.api b/sdk/lib/zxc/zxc.api
index a9340e6..4a75ad2 100644
--- a/sdk/lib/zxc/zxc.api
+++ b/sdk/lib/zxc/zxc.api
@@ -1,6 +1,6 @@
 {
-  "pkg/zxc-experimental-driver-only/include/lib/fitx/internal/compiler.h": "3b050f1cec235f382982fbee11ad9a26",
-  "pkg/zxc-experimental-driver-only/include/lib/fitx/internal/result.h": "f56b1896a3f2e1f94d30700dc5083066",
-  "pkg/zxc-experimental-driver-only/include/lib/fitx/result.h": "914de85b14b2c20d12bc1a53d0033fd9",
-  "pkg/zxc-experimental-driver-only/include/lib/zx/status.h": "36bbdeaf15958596284fba6774e94ef2"
-}
\ No newline at end of file
+  "pkg/zxc/include/lib/fitx/internal/compiler.h": "3b050f1cec235f382982fbee11ad9a26",
+  "pkg/zxc/include/lib/fitx/internal/result.h": "f56b1896a3f2e1f94d30700dc5083066",
+  "pkg/zxc/include/lib/fitx/result.h": "914de85b14b2c20d12bc1a53d0033fd9",
+  "pkg/zxc/include/lib/zx/status.h": "36bbdeaf15958596284fba6774e94ef2"
+}
diff --git a/sdk/manifests/ddk.manifest b/sdk/manifests/ddk.manifest
index 310601c..c52d6e9 100644
--- a/sdk/manifests/ddk.manifest
+++ b/sdk/manifests/ddk.manifest
@@ -35,11 +35,11 @@
 sdk://pkg/driver_runtime_cpp
 sdk://pkg/fdio
 sdk://pkg/fidl
-sdk://pkg/fidl-llcpp-experimental-driver-only
+sdk://pkg/fidl-llcpp
 sdk://pkg/fidl_base
 sdk://pkg/fit
 sdk://pkg/fit-promise
-sdk://pkg/fitx-experimental-driver-only
+sdk://pkg/fitx
 sdk://pkg/hwreg
 sdk://pkg/mmio
 sdk://pkg/mmio-ptr
@@ -50,11 +50,11 @@
 sdk://pkg/syslog_structured_backend
 sdk://pkg/trace-engine
 sdk://pkg/zx
-sdk://pkg/zx-experimental-driver-only
-sdk://pkg/zxc-experimental-driver-only
+sdk://pkg/zx-status
+sdk://pkg/zxc
 sdk://tools/arm64/bindc
 sdk://tools/arm64/fidlgen_banjo
 sdk://tools/arm64/fidlgen_llcpp_experimental_driver_only_toolchain
 sdk://tools/x64/bindc
 sdk://tools/x64/fidlgen_banjo
-sdk://tools/x64/fidlgen_llcpp_experimental_driver_only_toolchain
+sdk://tools/x64/fidlgen_llcpp_experimental_driver_only_toolchain
\ No newline at end of file
diff --git a/src/bringup/bin/netsvc/test/integration/src/lib.rs b/src/bringup/bin/netsvc/test/integration/src/lib.rs
index 6bc9210..45272b6 100644
--- a/src/bringup/bin/netsvc/test/integration/src/lib.rs
+++ b/src/bringup/bin/netsvc/test/integration/src/lib.rs
@@ -398,8 +398,9 @@
     (realm, fs)
 }
 
-async fn with_netsvc_and_netstack_bind_port<E, F, Fut, A, V>(
+async fn with_netsvc_and_netstack_bind_port<E, F, Fut, A, V, P>(
     port: u16,
+    avoid_ports: P,
     name: &str,
     args: V,
     test: F,
@@ -409,6 +410,7 @@
     A: Into<String>,
     V: IntoIterator<Item = A>,
     E: netemul::Endpoint,
+    P: IntoIterator<Item = u16>,
 {
     let netsvc_name = format!("{}-netsvc", name);
     let ns_name = format!("{}-netstack", name);
@@ -453,6 +455,26 @@
     .await
     .expect("wait ll address");
 
+    // Bind to the specified ports to avoid later binding to an unspecified port
+    // that ends up matching these. Used by tests to avoid receiving unexpected
+    // traffic.
+    let _captive_ports_socks = futures::stream::iter(avoid_ports.into_iter())
+        .then(|port| {
+            fuchsia_async::net::UdpSocket::bind_in_realm(
+                &netstack_realm,
+                std::net::SocketAddrV6::new(
+                    std::net::Ipv6Addr::UNSPECIFIED,
+                    port,
+                    /* flowinfo */ 0,
+                    /* scope id */ 0,
+                )
+                .into(),
+            )
+            .map(move |r| r.unwrap_or_else(|e| panic!("bind in realm with {port}: {:?}", e)))
+        })
+        .collect::<Vec<_>>()
+        .await;
+
     let sock = fuchsia_async::net::UdpSocket::bind_in_realm(
         &netstack_realm,
         std::net::SocketAddrV6::new(
@@ -490,8 +512,10 @@
     Fut: futures::Future<Output = ()>,
     E: netemul::Endpoint,
 {
-    with_netsvc_and_netstack_bind_port::<E, _, _, _, _>(
+    with_netsvc_and_netstack_bind_port::<E, _, _, _, _, _>(
         /* unspecified port */ 0,
+        // Avoid the multicast ports, which will cause test flakes.
+        [debuglog::MULTICAST_PORT.get(), netboot::ADVERT_PORT.get()],
         name,
         DEFAULT_NETSVC_ARGS,
         test,
@@ -716,8 +740,9 @@
 
 #[variants_test]
 async fn debuglog<E: netemul::Endpoint>(name: &str) {
-    with_netsvc_and_netstack_bind_port::<E, _, _, _, _>(
+    with_netsvc_and_netstack_bind_port::<E, _, _, _, _, _>(
         debuglog::MULTICAST_PORT.get(),
+        [],
         name,
         DEFAULT_NETSVC_ARGS,
         debuglog_inner,
@@ -1014,8 +1039,9 @@
 
 #[variants_test]
 async fn advertises<E: netemul::Endpoint>(name: &str) {
-    let () = with_netsvc_and_netstack_bind_port::<E, _, _, _, _>(
+    let () = with_netsvc_and_netstack_bind_port::<E, _, _, _, _, _>(
         netsvc_proto::netboot::ADVERT_PORT.get(),
+        [],
         name,
         IntoIterator::into_iter(DEFAULT_NETSVC_ARGS).chain(["--advertise"]),
         |sock, scope| async move {
diff --git a/src/connectivity/wlan/drivers/third_party/broadcom/brcmfmac/sdio/bcmsdh.cc b/src/connectivity/wlan/drivers/third_party/broadcom/brcmfmac/sdio/bcmsdh.cc
index f5aa4fa..b8685cc 100644
--- a/src/connectivity/wlan/drivers/third_party/broadcom/brcmfmac/sdio/bcmsdh.cc
+++ b/src/connectivity/wlan/drivers/third_party/broadcom/brcmfmac/sdio/bcmsdh.cc
@@ -727,17 +727,34 @@
 }
 #endif  // POWER_MANAGEMENT
 
+static zx_status_t brcmf_sdiod_set_block_size(sdio_protocol_t* proto, sdio_func* func,
+                                              uint16_t block_size) {
+  zx_status_t ret = sdio_update_block_size(proto, block_size, false);
+  if (ret != ZX_OK) {
+    BRCMF_ERR("Failed to update block size: %s", zx_status_get_string(ret));
+    return ret;
+  }
+  // Cache the block size so we don't have to request it every time we do alignment calculations.
+  ret = sdio_get_block_size(proto, &func->blocksize);
+  if (ret != ZX_OK) {
+    BRCMF_ERR("Failed to get block size: %s", zx_status_get_string(ret));
+    return ret;
+  }
+  ZX_ASSERT(func->blocksize == block_size);
+  return ZX_OK;
+}
+
 static zx_status_t brcmf_sdiod_probe(struct brcmf_sdio_dev* sdiodev) {
   zx_status_t ret = ZX_OK;
 
-  ret = sdio_update_block_size(&sdiodev->sdio_proto_fn1, SDIO_FUNC1_BLOCKSIZE, false);
+  ret = brcmf_sdiod_set_block_size(&sdiodev->sdio_proto_fn1, sdiodev->func1, SDIO_FUNC1_BLOCKSIZE);
   if (ret != ZX_OK) {
-    BRCMF_ERR("Failed to set F1 blocksize");
+    BRCMF_ERR("Failed to set F1 blocksize: %s", zx_status_get_string(ret));
     goto out;
   }
-  ret = sdio_update_block_size(&sdiodev->sdio_proto_fn2, SDIO_FUNC2_BLOCKSIZE, false);
+  ret = brcmf_sdiod_set_block_size(&sdiodev->sdio_proto_fn2, sdiodev->func2, SDIO_FUNC2_BLOCKSIZE);
   if (ret != ZX_OK) {
-    BRCMF_ERR("Failed to set F2 blocksize");
+    BRCMF_ERR("Failed to set F2 blocksize: %s", zx_status_get_string(ret));
     goto out;
   }
 
@@ -872,7 +889,6 @@
       err = ZX_ERR_INTERNAL;
       goto fail;
     }
-    sdio_get_block_size(&sdio_proto_fn1, &func1->blocksize);
   }
   func2 = static_cast<decltype(func2)>(calloc(1, sizeof(struct sdio_func)));
   if (!func2) {
@@ -885,7 +901,6 @@
       err = ZX_ERR_INTERNAL;
       goto fail;
     }
-    sdio_get_block_size(&sdio_proto_fn2, &func2->blocksize);
   }
   sdiodev = new brcmf_sdio_dev{};
   if (!sdiodev) {
diff --git a/src/developer/ffx/plugins/assembly/src/args.rs b/src/developer/ffx/plugins/assembly/src/args.rs
index 2abe146..b0f528b 100644
--- a/src/developer/ffx/plugins/assembly/src/args.rs
+++ b/src/developer/ffx/plugins/assembly/src/args.rs
@@ -184,6 +184,9 @@
     /// regardless of whether the component exceeded its budget.
     #[argh(switch, short = 'v')]
     pub verbose: bool,
+    /// path where to write the verbose JSON output.
+    #[argh(option)]
+    pub verbose_json_output: Option<PathBuf>,
 }
 
 fn default_blobfs_layout() -> BlobFSLayout {
diff --git a/src/developer/ffx/plugins/assembly/src/operations/size_check.rs b/src/developer/ffx/plugins/assembly/src/operations/size_check.rs
index de33ab7..292b24e 100644
--- a/src/developer/ffx/plugins/assembly/src/operations/size_check.rs
+++ b/src/developer/ffx/plugins/assembly/src/operations/size_check.rs
@@ -115,7 +115,15 @@
     /// Number of bytes used by the packages this budget applies to.
     pub used_bytes: u64,
     /// Breakdown of storage consumption by package.
-    pub package_breakdown: HashMap<PathBuf, u64>,
+    pub package_breakdown: HashMap<PathBuf, PackageResult>,
+}
+
+#[derive(Debug, Serialize, Eq, PartialEq)]
+struct PackageResult {
+    /// Size of the package
+    pub size: u64,
+    /// Blobs in this package and their proportional size
+    blobs: HashMap<Hash, u64>,
 }
 
 /// Verifies that no budget is exceeded.
@@ -179,6 +187,12 @@
         }
     }
 
+    if let Some(verbose_json_output) = args.verbose_json_output {
+        let output: HashMap<&str, &BudgetResult> =
+            results.iter().map(|v| (v.name.as_str(), v)).collect();
+        write_json_file(&verbose_json_output, &output)?;
+    }
+
     // Print a text report for each overrun budget.
     let over_budget = results.iter().filter(|e| e.used_bytes > e.budget_bytes).count();
 
@@ -210,7 +224,7 @@
                     key.file_name()
                         .and_then(|name| name.to_str())
                         .ok_or(format_err!("Can't extract file name from path {:?}", key))?,
-                    value
+                    value.size,
                 );
             }
         }
@@ -415,13 +429,17 @@
 
         let mut package_breakdown = HashMap::new();
         for blob in filtered_blobs {
-            let count = &blob_count_by_hash.get(&blob.hash).ok_or(format_err!(
+            let count = blob_count_by_hash.get(&blob.hash).ok_or(format_err!(
                 "Can't find blob {} from package {:?} in map",
                 blob.hash,
                 blob.package
             ))?;
-            let package_size = package_breakdown.entry(blob.package.clone()).or_insert(0);
-            *package_size += count.size / count.share_count;
+            let package_result = package_breakdown
+                .entry(blob.package.clone())
+                .or_insert(PackageResult { size: 0, blobs: HashMap::new() });
+            package_result.size += count.size / count.share_count;
+            let blob_size = package_result.blobs.entry(blob.hash).or_insert(0);
+            *blob_size += count.size / count.share_count;
         }
 
         result.push(BudgetResult {
@@ -458,7 +476,7 @@
 mod tests {
     use crate::operations::size_check::{
         compute_budget_results, verify_budgets_with_tools, BlobInstance, BlobSizeAndCount,
-        BudgetBlobs, BudgetConfig, BudgetResult,
+        BudgetBlobs, BudgetConfig, BudgetResult, PackageResult,
     };
     use crate::util::read_config;
     use crate::util::write_json_file;
@@ -545,6 +563,7 @@
                 blob_sizes: [test_fs.path("blobs.json")].to_vec(),
                 gerrit_output: None,
                 verbose: false,
+                verbose_json_output: None,
             },
             Box::new(FakeToolProvider::default()),
         );
@@ -562,6 +581,7 @@
                 blob_sizes: [test_fs.path("blobs.json")].to_vec(),
                 gerrit_output: None,
                 verbose: false,
+                verbose_json_output: None,
             },
             Box::new(FakeToolProvider::default()),
         );
@@ -609,6 +629,7 @@
                 blob_sizes: [test_fs.path("blobs.json")].to_vec(),
                 gerrit_output: None,
                 verbose: false,
+                verbose_json_output: None,
             },
             Box::new(FakeToolProvider::default()),
         );
@@ -656,6 +677,7 @@
                 blob_sizes: [test_fs.path("blobs.json")].to_vec(),
                 gerrit_output: None,
                 verbose: false,
+                verbose_json_output: None,
             },
             Box::new(FakeToolProvider::default()),
         );
@@ -703,6 +725,7 @@
                 blob_sizes: [test_fs.path("blobs.json")].to_vec(),
                 gerrit_output: None,
                 verbose: false,
+                verbose_json_output: None,
             },
             Box::new(FakeToolProvider::default()),
         )
@@ -740,6 +763,7 @@
                 blob_sizes: [test_fs.path("blobs.json")].to_vec(),
                 gerrit_output: None,
                 verbose: false,
+                verbose_json_output: None,
             },
             Box::new(FakeToolProvider::default()),
         );
@@ -777,6 +801,7 @@
                 blob_sizes: [test_fs.path("blobs.json")].to_vec(),
                 gerrit_output: None,
                 verbose: false,
+                verbose_json_output: None,
             },
             Box::new(FakeToolProvider::default()),
         )
@@ -859,6 +884,7 @@
                 blob_sizes: [test_fs.path("blobs.json")].to_vec(),
                 gerrit_output: Some(test_fs.path("output.json")),
                 verbose: false,
+                verbose_json_output: None,
             },
             Box::new(FakeToolProvider::default()),
         );
@@ -933,6 +959,7 @@
                 blob_sizes: [test_fs.path("blobs.json")].to_vec(),
                 gerrit_output: Some(test_fs.path("output.json")),
                 verbose: false,
+                verbose_json_output: None,
             },
             Box::new(FakeToolProvider::default()),
         )
@@ -1037,6 +1064,7 @@
                 blob_sizes: [test_fs.path("blobs.json")].to_vec(),
                 gerrit_output: Some(test_fs.path("output.json")),
                 verbose: false,
+                verbose_json_output: Some(test_fs.path("verbose-output.json")),
             },
             Box::new(FakeToolProvider::default()),
         )
@@ -1055,6 +1083,46 @@
                 "Software Deliver.owner": "http://go/fuchsia-size-stats/single_component/?f=component%3Ain%3ASoftware+Deliver"
             }),
         );
+        test_fs.assert_eq(
+            "verbose-output.json",
+            json!({
+                "Software Deliver": {
+                  "name": "Software Deliver",
+                  "budget_bytes": 7497932,
+                  "creep_budget_bytes": 1,
+                  "used_bytes": 106,
+                  "package_breakdown": {
+                    test_fs.path("obj/src/sys/pkg/bin/pkg-cache/pkg-cache/package_manifest.json").to_str().unwrap(): {
+                      "size": 53,
+                      "blobs": {
+                        "0e56473237b6b2ce39358c11a0fbd2f89902f246d966898d7d787c9025124d51": 53
+                      }
+                    },
+                    test_fs.path("obj/src/sys/pkg/bin/pkgfs/pkgfs/package_manifest.json").to_str().unwrap(): {
+                      "size": 53,
+                      "blobs": {
+                        "0e56473237b6b2ce39358c11a0fbd2f89902f246d966898d7d787c9025124d51": 53
+                      }
+                    }
+                  }
+                },
+                "Connectivity": {
+                  "name": "Connectivity",
+                  "budget_bytes": 10884219,
+                  "creep_budget_bytes": 1,
+                  "used_bytes": 53,
+                  "package_breakdown": {
+                    test_fs.path("obj/src/connectivity/bluetooth/core/bt-gap/bt-gap/package_manifest.json").to_str().unwrap(): {
+                      "size": 53,
+                      "blobs": {
+                        "0e56473237b6b2ce39358c11a0fbd2f89902f246d966898d7d787c9025124d51": 53
+                      }
+                    }
+                  }
+                }
+              }
+              )
+        );
     }
 
     #[test]
@@ -1098,6 +1166,7 @@
                 blob_sizes: [test_fs.path("blobs.json")].to_vec(),
                 gerrit_output: Some(test_fs.path("output.json")),
                 verbose: false,
+                verbose_json_output: None,
             },
             Box::new(FakeToolProvider::default()),
         );
@@ -1177,6 +1246,7 @@
                 blob_sizes: [test_fs.path("blobs.json")].to_vec(),
                 gerrit_output: Some(test_fs.path("output.json")),
                 verbose: false,
+                verbose_json_output: None,
             },
             Box::new(FakeToolProvider::default()),
         )
@@ -1263,6 +1333,7 @@
                 blob_sizes: [test_fs.path("blobs1.json")].to_vec(),
                 gerrit_output: Some(test_fs.path("output.json")),
                 verbose: false,
+                verbose_json_output: None,
             },
             tool_provider,
         )
@@ -1359,8 +1430,23 @@
                 creep_budget_bytes: 3245,
                 used_bytes: 115,
                 package_breakdown: HashMap::from([
-                    (package2_path.clone(), 70 /* 90/2 + 50/2 */),
-                    (package1_path.clone(), 45 /* 90/2 */),
+                    (
+                        package2_path.clone(),
+                        PackageResult {
+                            size: 70, /* 90/2 + 50/2 */
+                            blobs: HashMap::from([
+                                (blob1_hash.clone(), 45),
+                                (blob2_hash.clone(), 25),
+                            ]),
+                        },
+                    ),
+                    (
+                        package1_path.clone(),
+                        PackageResult {
+                            size: 45, /* 90/2 */
+                            blobs: HashMap::from([(blob1_hash.clone(), 45)]),
+                        },
+                    ),
                 ]),
             },
             BudgetResult {
@@ -1368,7 +1454,13 @@
                 budget_bytes: 456,
                 creep_budget_bytes: 111,
                 used_bytes: 31, /* 25 + 6 */
-                package_breakdown: HashMap::from([(package3_path.clone(), 25 /* 50/2 */)]),
+                package_breakdown: HashMap::from([(
+                    package3_path.clone(),
+                    PackageResult {
+                        size: 25, /* 50/2 */
+                        blobs: HashMap::from([(blob2_hash.clone(), 25)]),
+                    },
+                )]),
             },
         ];
         assert_eq!(results, expected_result);
diff --git a/src/developer/sshd-host/BUILD.gn b/src/developer/sshd-host/BUILD.gn
index fc1ff02..e0d0c05 100644
--- a/src/developer/sshd-host/BUILD.gn
+++ b/src/developer/sshd-host/BUILD.gn
@@ -5,7 +5,6 @@
 import("//build/components.gni")
 import("//build/config.gni")
 import("//src/sys/core/build/core_shard.gni")
-import("fxbug_dev_90440.gni")
 
 group("tests") {
   testonly = true
@@ -42,48 +41,20 @@
   ]
 }
 
-fuchsia_component("cmx") {
-  component_name = "sshd-host"
-  manifest = "meta/sshd-host.cmx"
-  deps = [ ":bin" ]
-}
-
-fuchsia_component("cml") {
-  component_name = "sshd-host"
+fuchsia_package_with_single_component("sshd-host") {
   manifest = "meta/sshd-host.cml"
-  deps = [ ":bin" ]
-
-  # TODO(fxbug.dev/97903): remove the below after removing svc_from_sys from
-  # sshd-host.cml.
-  check_includes = false
-}
-
-fuchsia_package("sshd-host") {
   deps = [
     # Ensure this isn't included in production
     "//build/validate:non_production_tag",
 
     # Used by programs that need to load ICU locale data, such as tz-util.
+    ":bin",
     "//src/intl:icudtl",
     "//third_party/openssh-portable:hostkeygen",
     "//third_party/openssh-portable:scp",
     "//third_party/openssh-portable:sftp-server",
     "//third_party/openssh-portable:sshd",
   ]
-
-  # TODO(fxbug.dev/90440): delete the below after we use cml everywhere
-  if (sshd_host_component == "cmx") {
-    deps += [
-      ":cmx",
-
-      # Configure sysmgr to start sshd-host.cmx
-      "//src/sys/sysmgr:sshd_host_config",
-    ]
-  } else if (sshd_host_component == "cml") {
-    deps += [ ":cml" ]
-  } else {
-    assert(false, "Invalid value sshd_host_component = $sshd_host_component")
-  }
 }
 
 core_shard("core_shard") {
diff --git a/src/developer/sshd-host/fxbug_dev_90440.gni b/src/developer/sshd-host/fxbug_dev_90440.gni
deleted file mode 100644
index acea593..0000000
--- a/src/developer/sshd-host/fxbug_dev_90440.gni
+++ /dev/null
@@ -1,11 +0,0 @@
-# Copyright 2022 The Fuchsia Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# TODO(fxbug.dev/90440): delete ths file after we use cml everywhere.
-
-declare_args() {
-  # Use sshd_host.cmx or sshd_host.cml.
-  # Valid values: "cmx", "cml"
-  sshd_host_component = "cmx"
-}
diff --git a/src/developer/sshd-host/meta/sshd-host.cmx b/src/developer/sshd-host/meta/sshd-host.cmx
deleted file mode 100644
index ae9d81b..0000000
--- a/src/developer/sshd-host/meta/sshd-host.cmx
+++ /dev/null
@@ -1,22 +0,0 @@
-{
-    "include": [
-        "syslog/client.shard.cmx"
-    ],
-    "program": {
-        "binary": "bin/sshd-host"
-    },
-    "sandbox": {
-        "features": [
-            "build-info",
-            "config-data",
-            "deprecated-shell"
-        ],
-        "services": [
-            "fuchsia.boot.Items",
-            "fuchsia.hardware.pty.Device",
-            "fuchsia.net.name.Lookup",
-            "fuchsia.posix.socket.Provider",
-            "fuchsia.process.Launcher"
-        ]
-    }
-}
diff --git a/src/devices/bin/driver_host2/driver_host.cc b/src/devices/bin/driver_host2/driver_host.cc
index 47a47a7..c0a5586 100644
--- a/src/devices/bin/driver_host2/driver_host.cc
+++ b/src/devices/bin/driver_host2/driver_host.cc
@@ -138,6 +138,22 @@
   return status;
 }
 
+uint32_t DriverHost::ExtractDefaultDispatcherOpts(const fuchsia_data::wire::Dictionary& program) {
+  auto default_dispatcher_opts = driver::ProgramValueAsVector(program, "default_dispatcher_opts");
+
+  uint32_t opts = 0;
+  if (default_dispatcher_opts.is_ok()) {
+    for (auto opt : *default_dispatcher_opts) {
+      if (opt == "allow_sync_calls") {
+        opts |= FDF_DISPATCHER_OPTION_ALLOW_SYNC_CALLS;
+      } else {
+        LOGF(WARNING, "Ignoring unknown default_dispatcher_opt: %s", opt.c_str());
+      }
+    }
+  }
+  return opts;
+}
+
 void DriverHost::Start(StartRequestView request, StartCompleter::Sync& completer) {
   if (!request->start_args.has_url()) {
     LOGF(ERROR, "Failed to start driver, missing 'url' argument");
@@ -204,6 +220,8 @@
     return;
   }
 
+  uint32_t default_dispatcher_opts = ExtractDefaultDispatcherOpts(request->start_args.program());
+
   // Once we receive the VMO from the call to GetBuffer, we can load the driver
   // into this driver host. We move the storage and encoded for start_args into
   // this callback to extend its lifetime.
@@ -211,7 +229,8 @@
                               std::make_unique<FileEventHandler>(url));
   auto callback = [this, request = std::move(request->driver), completer = completer.ToAsync(),
                    url = std::move(url), converted_message = std::move(converted_message),
-                   wire_format_metadata, _ = file.Clone()](
+                   wire_format_metadata,
+                   default_dispatcher_opts = std::move(default_dispatcher_opts), _ = file.Clone()](
                       fidl::WireUnownedResult<fio::File::GetBackingMemory>& result) mutable {
     if (!result.ok()) {
       LOGF(ERROR, "Failed to start driver '%s', could not get library VMO: %s", url.data(),
@@ -263,18 +282,18 @@
       //
       // We do not destroy the dispatcher in the shutdown callback, to prevent crashes that
       // would happen if the driver attempts to access the dispatcher in its Stop hook.
-      uint32_t options = 0;
+      uint32_t options = default_dispatcher_opts;
 
       // TODO(fxbug.dev/99310): When we can parse CMLs to get this information,
       // please delete these.
       if (url == "fuchsia-boot:///#meta/intel-i2c-dfv2.cm") {
-        options = FDF_DISPATCHER_OPTION_ALLOW_SYNC_CALLS;
+        options |= FDF_DISPATCHER_OPTION_ALLOW_SYNC_CALLS;
       }
       if (url == "fuchsia-boot:///#meta/i2c.cm") {
-        options = FDF_DISPATCHER_OPTION_ALLOW_SYNC_CALLS;
+        options |= FDF_DISPATCHER_OPTION_ALLOW_SYNC_CALLS;
       }
       if (url == "fuchsia-boot:///#meta/i2c-hid-dfv2.cm") {
-        options = FDF_DISPATCHER_OPTION_ALLOW_SYNC_CALLS;
+        options |= FDF_DISPATCHER_OPTION_ALLOW_SYNC_CALLS;
       }
       auto dispatcher =
           fdf::Dispatcher::Create(options, [driver_ref = *driver](fdf_dispatcher_t* dispatcher) {});
diff --git a/src/devices/bin/driver_host2/driver_host.h b/src/devices/bin/driver_host2/driver_host.h
index e69a1b4..9264c47 100644
--- a/src/devices/bin/driver_host2/driver_host.h
+++ b/src/devices/bin/driver_host2/driver_host.h
@@ -68,6 +68,11 @@
   void GetProcessKoid(GetProcessKoidRequestView request,
                       GetProcessKoidCompleter::Sync& completer) override;
 
+  // Extracts the default_dispatcher_opts from |program| and converts it to
+  // the options value expected by |fdf::Dispatcher::Create|.
+  // Returns zero if no options were specified.
+  uint32_t ExtractDefaultDispatcherOpts(const fuchsia_data::wire::Dictionary& program);
+
   async::Loop& loop_;
   std::mutex mutex_;
   fbl::DoublyLinkedList<fbl::RefPtr<Driver>> drivers_ __TA_GUARDED(mutex_);
diff --git a/src/devices/lib/dev-operation/BUILD.gn b/src/devices/lib/dev-operation/BUILD.gn
index c76a2c4..dc6f95e 100644
--- a/src/devices/lib/dev-operation/BUILD.gn
+++ b/src/devices/lib/dev-operation/BUILD.gn
@@ -15,7 +15,7 @@
 zx_library("dev-operation") {
   sdk = "source"
   sdk_publishable = "experimental"
-  sdk_name = "dev-operation-experimental-driver-only"
+  sdk_name = "dev-operation"
   sdk_headers = [
     "lib/operation/operation.h",
     "lib/operation/helpers/algorithm.h",
diff --git a/src/devices/tests/v2/BUILD.gn b/src/devices/tests/v2/BUILD.gn
index f125318..d040f91 100644
--- a/src/devices/tests/v2/BUILD.gn
+++ b/src/devices/tests/v2/BUILD.gn
@@ -13,6 +13,7 @@
     "driver-runner-integration:tests",
     "dynamic_offers:tests",
     "interop:tests",
+    "runtime-dispatcher:tests",
     "services:tests",
   ]
 }
@@ -31,6 +32,8 @@
     "interop:leaf",
     "interop:root",
     "interop:v1",
+    "runtime-dispatcher:leaf",
+    "runtime-dispatcher:root",
     "services:root",
   ]
 }
diff --git a/src/devices/tests/v2/runtime-dispatcher/BUILD.gn b/src/devices/tests/v2/runtime-dispatcher/BUILD.gn
new file mode 100644
index 0000000..f0b2d6c
--- /dev/null
+++ b/src/devices/tests/v2/runtime-dispatcher/BUILD.gn
@@ -0,0 +1,116 @@
+# Copyright 2022 The Fuchsia Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/components.gni")
+import("//build/drivers.gni")
+import("//build/rust/rustc_test.gni")
+
+group("tests") {
+  testonly = true
+  deps = [
+    ":runtime-dispatcher-leaf-bind_test",
+    ":runtime-dispatcher-test",
+  ]
+}
+
+fidl("fuchsia.runtime.test") {
+  testonly = true
+  sources = [ "runtime.test.fidl" ]
+}
+
+driver_bind_rules("root-bind") {
+  disable_autobind = true
+  bind_output = "root.bindbc"
+}
+
+fuchsia_driver("root-driver") {
+  testonly = true
+  output_name = "runtime-dispatcher-root"
+  sources = [ "root-driver.cc" ]
+  deps = [
+    ":fuchsia.runtime.test_llcpp",
+    ":root-bind",
+    "//sdk/fidl/fuchsia.component.decl:fuchsia.component.decl_llcpp",
+    "//sdk/lib/driver2:llcpp",
+    "//sdk/lib/sys/component/llcpp",
+    "//src/devices/bind/fuchsia.test:bind.fuchsia.test",
+    "//src/devices/lib/driver:driver_runtime",
+    "//zircon/system/ulib/service:service-llcpp",
+  ]
+}
+
+fuchsia_driver_component("root") {
+  testonly = true
+  manifest = "meta/root.cml"
+  deps = [ ":root-driver" ]
+  info = "root-info.json"
+}
+
+driver_bind_rules("runtime-dispatcher-leaf-bind") {
+  rules = "leaf.bind"
+  bind_output = "leaf.bindbc"
+  tests = "leaf.bind-tests.json"
+  deps = [ "//src/devices/bind/fuchsia.test" ]
+}
+
+fuchsia_driver("leaf-driver") {
+  testonly = true
+  output_name = "runtime-dispatcher-leaf"
+  sources = [ "leaf-driver.cc" ]
+  deps = [
+    ":fuchsia.runtime.test_llcpp",
+    ":runtime-dispatcher-leaf-bind",
+    "//sdk/lib/driver2:llcpp",
+    "//src/devices/lib/driver:driver_runtime",
+    "//zircon/system/ulib/async:async-cpp",
+  ]
+}
+
+fuchsia_driver_component("leaf") {
+  testonly = true
+  manifest = "meta/leaf.cml"
+  deps = [ ":leaf-driver" ]
+  info = "leaf-info.json"
+}
+
+rustc_test("bin") {
+  testonly = true
+  output_name = "runtime-dispatcher-bin"
+  source_root = "test.rs"
+  sources = [ "test.rs" ]
+  deps = [
+    ":fuchsia.runtime.test-rustc",
+    "//sdk/fidl/fuchsia.driver.test:fuchsia.driver.test-rustc",
+    "//sdk/lib/driver_test_realm/realm_builder/rust",
+    "//src/lib/fidl/rust/fidl",
+    "//src/lib/fuchsia-async",
+    "//src/lib/fuchsia-component",
+    "//src/lib/fuchsia-component-test",
+    "//third_party/rust_crates:anyhow",
+    "//third_party/rust_crates:futures",
+  ]
+}
+
+cmc_merge("realm.cml") {
+  sources = [
+    "//sdk/lib/driver_test_realm/meta/driver_test_realm.cml",
+    "meta/realm.shard.cml",
+  ]
+}
+
+fuchsia_test_component("realm") {
+  manifest = "${target_out_dir}/realm.cml"
+  restricted_features = [ "dynamic_offers" ]
+  manifest_deps = [ ":realm.cml" ]
+}
+
+fuchsia_unittest_package("runtime-dispatcher-test") {
+  deps = [
+    ":bin",
+    ":leaf",
+    ":realm",
+    ":root",
+    "//src/devices/bin/driver_host2:component",
+  ]
+}
diff --git a/src/devices/tests/v2/runtime-dispatcher/leaf-driver.cc b/src/devices/tests/v2/runtime-dispatcher/leaf-driver.cc
new file mode 100644
index 0000000..bb947e1
--- /dev/null
+++ b/src/devices/tests/v2/runtime-dispatcher/leaf-driver.cc
@@ -0,0 +1,103 @@
+// Copyright 2022 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <fidl/fuchsia.driver.framework/cpp/wire.h>
+#include <fidl/fuchsia.runtime.test/cpp/wire.h>
+#include <lib/async/cpp/executor.h>
+#include <lib/driver2/logger.h>
+#include <lib/driver2/namespace.h>
+#include <lib/driver2/promise.h>
+#include <lib/driver2/record_cpp.h>
+#include <lib/fpromise/bridge.h>
+#include <lib/fpromise/scope.h>
+
+namespace fdf {
+using namespace fuchsia_driver_framework;
+}  // namespace fdf
+
+namespace ft = fuchsia_runtime_test;
+
+using fpromise::error;
+using fpromise::ok;
+using fpromise::promise;
+using fpromise::result;
+
+namespace {
+
+class LeafDriver {
+ public:
+  LeafDriver(fdf::UnownedDispatcher dispatcher, fidl::WireSharedClient<fdf::Node> node,
+             driver::Namespace ns, driver::Logger logger)
+      : dispatcher_(dispatcher),
+        executor_(dispatcher->async_dispatcher()),
+        node_(std::move(node)),
+        ns_(std::move(ns)),
+        logger_(std::move(logger)) {}
+
+  static constexpr const char* Name() { return "leaf"; }
+
+  static zx::status<std::unique_ptr<LeafDriver>> Start(fdf::wire::DriverStartArgs& start_args,
+                                                       fdf::UnownedDispatcher dispatcher,
+                                                       fidl::WireSharedClient<fdf::Node> node,
+                                                       driver::Namespace ns,
+                                                       driver::Logger logger) {
+    auto driver = std::make_unique<LeafDriver>(std::move(dispatcher), std::move(node),
+                                               std::move(ns), std::move(logger));
+
+    driver->Run();
+    return zx::ok(std::move(driver));
+  }
+
+ private:
+  void Run() {
+    // Test we can block on the dispatcher thread.
+    ZX_ASSERT(ZX_OK == DoHandshakeSynchronously());
+
+    auto task = driver::Connect<ft::Waiter>(ns_, dispatcher_->async_dispatcher())
+                    .and_then(fit::bind_member(this, &LeafDriver::CallAck))
+                    .or_else(fit::bind_member(this, &LeafDriver::UnbindNode))
+                    .wrap_with(scope_);
+    executor_.schedule_task(std::move(task));
+  }
+
+  zx_status_t DoHandshakeSynchronously() {
+    ZX_ASSERT((*dispatcher_->options() & FDF_DISPATCHER_OPTION_ALLOW_SYNC_CALLS) ==
+              FDF_DISPATCHER_OPTION_ALLOW_SYNC_CALLS);
+
+    std::string_view path = fidl::DiscoverableProtocolDefaultPath<ft::Handshake>;
+    fuchsia_io::wire::OpenFlags flags = fuchsia_io::wire::OpenFlags::kRightReadable;
+
+    auto result = ns_.Connect<ft::Handshake>(path, flags);
+    if (result.is_error()) {
+      return result.status_value();
+    }
+    fidl::WireSyncClient<ft::Handshake> client(std::move(*result));
+    return client->Do().status();
+  }
+
+  result<void, zx_status_t> CallAck(const fidl::WireSharedClient<ft::Waiter>& waiter) {
+    __UNUSED auto result = waiter->Ack();
+    return ok();
+  }
+
+  result<> UnbindNode(const zx_status_t& status) {
+    FDF_LOG(ERROR, "Failed to start leaf driver: %s", zx_status_get_string(status));
+    node_.AsyncTeardown();
+    return ok();
+  }
+
+  fdf::UnownedDispatcher const dispatcher_;
+  async::Executor executor_;
+
+  fidl::WireSharedClient<fdf::Node> node_;
+  driver::Namespace ns_;
+  driver::Logger logger_;
+
+  // NOTE: Must be the last member.
+  fpromise::scope scope_;
+};
+
+}  // namespace
+
+FUCHSIA_DRIVER_RECORD_CPP_V1(LeafDriver);
diff --git a/src/devices/tests/v2/runtime-dispatcher/leaf-info.json b/src/devices/tests/v2/runtime-dispatcher/leaf-info.json
new file mode 100644
index 0000000..61df319
--- /dev/null
+++ b/src/devices/tests/v2/runtime-dispatcher/leaf-info.json
@@ -0,0 +1,10 @@
+{
+    "short_description": "Driver Framework runtime dispatcher integration test leaf driver",
+    "manufacturer": "",
+    "families": [],
+    "models": [],
+    "areas": [
+        "DriverFramework",
+        "Test"
+    ]
+}
diff --git a/src/devices/tests/v2/runtime-dispatcher/leaf.bind b/src/devices/tests/v2/runtime-dispatcher/leaf.bind
new file mode 100644
index 0000000..2861c1d
--- /dev/null
+++ b/src/devices/tests/v2/runtime-dispatcher/leaf.bind
@@ -0,0 +1,7 @@
+// Copyright 2022 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+using fuchsia.test;
+
+fuchsia.BIND_PROTOCOL == fuchsia.test.BIND_PROTOCOL.DEVICE;
diff --git a/src/devices/tests/v2/runtime-dispatcher/leaf.bind-tests.json b/src/devices/tests/v2/runtime-dispatcher/leaf.bind-tests.json
new file mode 100644
index 0000000..9606f72
--- /dev/null
+++ b/src/devices/tests/v2/runtime-dispatcher/leaf.bind-tests.json
@@ -0,0 +1,16 @@
+[
+    {
+        "device": {
+            "fuchsia.BIND_PROTOCOL": "fuchsia.unknown.BIND_PROTOCOL.DEVICE"
+        },
+        "expected": "abort",
+        "name": "Mismatched protocol"
+    },
+    {
+        "device": {
+            "fuchsia.BIND_PROTOCOL": "fuchsia.test.BIND_PROTOCOL.DEVICE"
+        },
+        "expected": "match",
+        "name": "Matched protocol"
+    }
+]
diff --git a/src/devices/tests/v2/runtime-dispatcher/meta/leaf.cml b/src/devices/tests/v2/runtime-dispatcher/meta/leaf.cml
new file mode 100644
index 0000000..8e237b2
--- /dev/null
+++ b/src/devices/tests/v2/runtime-dispatcher/meta/leaf.cml
@@ -0,0 +1,24 @@
+// Copyright 2022 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+{
+    include: [
+        "inspect/client.shard.cml",
+        "syslog/client.shard.cml",
+    ],
+    program: {
+        runner: "driver",
+        binary: "driver/runtime-dispatcher-leaf.so",
+        bind: "meta/bind/leaf.bindbc",
+        colocate: "true",
+        default_dispatcher_opts: [ "allow_sync_calls" ],
+    },
+    use: [
+        {
+            protocol: [
+                "fuchsia.runtime.test.Handshake",
+                "fuchsia.runtime.test.Waiter",
+            ],
+        },
+    ],
+}
diff --git a/src/devices/tests/v2/runtime-dispatcher/meta/realm.shard.cml b/src/devices/tests/v2/runtime-dispatcher/meta/realm.shard.cml
new file mode 100644
index 0000000..7fcf250
--- /dev/null
+++ b/src/devices/tests/v2/runtime-dispatcher/meta/realm.shard.cml
@@ -0,0 +1,12 @@
+// Copyright 2022 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+{
+    offer: [
+        {
+            protocol: "fuchsia.runtime.test.Waiter",
+            from: "parent",
+            to: "#boot-drivers",
+        },
+    ],
+}
diff --git a/src/devices/tests/v2/runtime-dispatcher/meta/root.cml b/src/devices/tests/v2/runtime-dispatcher/meta/root.cml
new file mode 100644
index 0000000..1911f4a
--- /dev/null
+++ b/src/devices/tests/v2/runtime-dispatcher/meta/root.cml
@@ -0,0 +1,23 @@
+// Copyright 2022 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+{
+    include: [
+        "inspect/client.shard.cml",
+        "syslog/client.shard.cml",
+    ],
+    program: {
+        runner: "driver",
+        binary: "driver/runtime-dispatcher-root.so",
+        bind: "meta/bind/root.bindbc",
+    },
+    capabilities: [
+        { protocol: "fuchsia.runtime.test.Handshake" },
+    ],
+    expose: [
+        {
+            protocol: "fuchsia.runtime.test.Handshake",
+            from: "self",
+        },
+    ],
+}
diff --git a/src/devices/tests/v2/runtime-dispatcher/root-driver.cc b/src/devices/tests/v2/runtime-dispatcher/root-driver.cc
new file mode 100644
index 0000000..7fccfec
--- /dev/null
+++ b/src/devices/tests/v2/runtime-dispatcher/root-driver.cc
@@ -0,0 +1,148 @@
+// Copyright 2022 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <fidl/fuchsia.component.decl/cpp/wire.h>
+#include <fidl/fuchsia.driver.framework/cpp/wire.h>
+#include <fidl/fuchsia.runtime.test/cpp/wire.h>
+#include <lib/async/cpp/executor.h>
+#include <lib/driver2/logger.h>
+#include <lib/driver2/namespace.h>
+#include <lib/driver2/promise.h>
+#include <lib/driver2/record_cpp.h>
+#include <lib/fpromise/bridge.h>
+#include <lib/fpromise/scope.h>
+#include <lib/sys/component/llcpp/outgoing_directory.h>
+
+#include <bind/fuchsia/test/cpp/fidl.h>
+
+namespace fdf {
+using namespace fuchsia_driver_framework;
+}  // namespace fdf
+
+namespace fcd = fuchsia_component_decl;
+namespace fio = fuchsia_io;
+namespace ft = fuchsia_runtime_test;
+
+using fpromise::error;
+using fpromise::ok;
+using fpromise::promise;
+using fpromise::result;
+
+namespace {
+
+class RootDriver : public fidl::WireServer<ft::Handshake> {
+ public:
+  RootDriver(async_dispatcher_t* dispatcher, fidl::WireSharedClient<fdf::Node> node,
+             driver::Namespace ns, driver::Logger logger)
+      : dispatcher_(dispatcher),
+        executor_(dispatcher),
+        outgoing_(component::OutgoingDirectory::Create(dispatcher)),
+        node_(std::move(node)),
+        ns_(std::move(ns)),
+        logger_(std::move(logger)) {}
+
+  static constexpr const char* Name() { return "root"; }
+
+  static zx::status<std::unique_ptr<RootDriver>> Start(fdf::wire::DriverStartArgs& start_args,
+                                                       fdf::UnownedDispatcher dispatcher,
+                                                       fidl::WireSharedClient<fdf::Node> node,
+                                                       driver::Namespace ns,
+                                                       driver::Logger logger) {
+    auto driver = std::make_unique<RootDriver>(dispatcher->async_dispatcher(), std::move(node),
+                                               std::move(ns), std::move(logger));
+    auto result = driver->Run(std::move(start_args.outgoing_dir()));
+    if (result.is_error()) {
+      return result.take_error();
+    }
+    return zx::ok(std::move(driver));
+  }
+
+ private:
+  zx::status<> Run(fidl::ServerEnd<fio::Directory> outgoing_dir) {
+    // Setup the outgoing directory.
+    auto service = [this](fidl::ServerEnd<ft::Handshake> server_end) {
+      fidl::BindServer(dispatcher_, std::move(server_end), this);
+    };
+    zx::status<> status = outgoing_.AddProtocol<ft::Handshake>(std::move(service));
+    if (status.is_error()) {
+      return status;
+    }
+
+    auto serve = outgoing_.Serve(std::move(outgoing_dir));
+    if (serve.is_error()) {
+      return serve.take_error();
+    }
+
+    // Start the driver.
+    auto task =
+        AddChild().or_else(fit::bind_member(this, &RootDriver::UnbindNode)).wrap_with(scope_);
+    executor_.schedule_task(std::move(task));
+    return zx::ok();
+  }
+
+  promise<void, fdf::wire::NodeError> AddChild() {
+    fidl::Arena arena;
+
+    // Offer `fuchsia.test.Handshake` to the driver that binds to the node.
+    auto protocol =
+        fcd::wire::OfferProtocol::Builder(arena)
+            .source_name(
+                fidl::StringView::FromExternal(fidl::DiscoverableProtocolName<ft::Handshake>))
+            .target_name(
+                fidl::StringView::FromExternal(fidl::DiscoverableProtocolName<ft::Handshake>))
+            .dependency_type(fcd::wire::DependencyType::kStrong)
+            .Build();
+    fcd::wire::Offer offer = fcd::wire::Offer::WithProtocol(arena, std::move(protocol));
+
+    // Set the properties of the node that a driver will bind to.
+    auto property = fdf::wire::NodeProperty::Builder(arena)
+                        .key(fdf::wire::NodePropertyKey::WithIntValue(1 /* BIND_PROTOCOL */))
+                        .value(fdf::wire::NodePropertyValue::WithIntValue(
+                            bind::fuchsia::test::BIND_PROTOCOL_DEVICE))
+                        .Build();
+
+    auto args =
+        fdf::wire::NodeAddArgs::Builder(arena)
+            .name("leaf")
+            .properties(fidl::VectorView<fdf::wire::NodeProperty>::FromExternal(&property, 1))
+            .offers(fidl::VectorView<fcd::wire::Offer>::FromExternal(&offer, 1))
+            .Build();
+
+    // Create endpoints of the `NodeController` for the node.
+    auto endpoints = fidl::CreateEndpoints<fdf::NodeController>();
+    if (endpoints.is_error()) {
+      return fpromise::make_error_promise(fdf::wire::NodeError::kInternal);
+    }
+
+    return driver::AddChild(node_, std::move(args), std::move(endpoints->server), {})
+        .and_then([this, client = std::move(endpoints->client)]() mutable {
+          controller_.Bind(std::move(client), dispatcher_);
+        });
+  }
+
+  result<> UnbindNode(const fdf::wire::NodeError& error) {
+    FDF_LOG(ERROR, "Failed to start root driver: %d", error);
+    node_.AsyncTeardown();
+    return ok();
+  }
+
+  // fidl::WireServer<ft::Handshake>
+  void Do(DoRequestView request, DoCompleter::Sync& completer) override { completer.Reply(); }
+
+  async_dispatcher_t* const dispatcher_;
+  async::Executor executor_;
+  component::OutgoingDirectory outgoing_;
+
+  fidl::WireSharedClient<fdf::Node> node_;
+  fidl::WireSharedClient<fdf::NodeController> controller_;
+  driver::Namespace ns_;
+  driver::Logger logger_;
+
+  // NOTE: Must be the last member.
+  fpromise::scope scope_;
+};
+
+}  // namespace
+
+FUCHSIA_DRIVER_RECORD_CPP_V1(RootDriver);
diff --git a/src/devices/tests/v2/runtime-dispatcher/root-info.json b/src/devices/tests/v2/runtime-dispatcher/root-info.json
new file mode 100644
index 0000000..081d167
--- /dev/null
+++ b/src/devices/tests/v2/runtime-dispatcher/root-info.json
@@ -0,0 +1,10 @@
+{
+    "short_description": "Driver Framework runtime dispatcher integration test root driver",
+    "manufacturer": "",
+    "families": [],
+    "models": [],
+    "areas": [
+        "DriverFramework",
+        "Test"
+    ]
+}
diff --git a/src/devices/tests/v2/runtime-dispatcher/runtime.test.fidl b/src/devices/tests/v2/runtime-dispatcher/runtime.test.fidl
new file mode 100644
index 0000000..936811e
--- /dev/null
+++ b/src/devices/tests/v2/runtime-dispatcher/runtime.test.fidl
@@ -0,0 +1,15 @@
+// Copyright 2022 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+library fuchsia.runtime.test;
+
+@discoverable
+protocol Handshake {
+    Do() -> ();
+};
+
+@discoverable
+protocol Waiter {
+    Ack();
+};
diff --git a/src/devices/tests/v2/runtime-dispatcher/test.rs b/src/devices/tests/v2/runtime-dispatcher/test.rs
new file mode 100644
index 0000000..455a429
--- /dev/null
+++ b/src/devices/tests/v2/runtime-dispatcher/test.rs
@@ -0,0 +1,74 @@
+// Copyright 2022 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use {
+    anyhow::{anyhow, Error, Result},
+    fidl_fuchsia_driver_test as fdt, fidl_fuchsia_runtime_test as ft, fuchsia_async as fasync,
+    fuchsia_async::futures::{StreamExt, TryStreamExt},
+    fuchsia_component::server::ServiceFs,
+    fuchsia_component_test::{
+        Capability, ChildOptions, LocalComponentHandles, RealmBuilder, Ref, Route,
+    },
+    fuchsia_driver_test::{DriverTestRealmBuilder, DriverTestRealmInstance},
+    futures::channel::mpsc,
+};
+
+const WAITER_NAME: &'static str = "waiter";
+
+async fn waiter_serve(mut stream: ft::WaiterRequestStream, mut sender: mpsc::Sender<()>) {
+    while let Some(ft::WaiterRequest::Ack { .. }) = stream.try_next().await.expect("Stream failed")
+    {
+        sender.try_send(()).expect("Sender failed")
+    }
+}
+
+async fn waiter_component(
+    handles: LocalComponentHandles,
+    sender: mpsc::Sender<()>,
+) -> Result<(), Error> {
+    let mut fs = ServiceFs::new();
+    fs.dir("svc").add_fidl_service(move |stream: ft::WaiterRequestStream| {
+        fasync::Task::spawn(waiter_serve(stream, sender.clone())).detach()
+    });
+    fs.serve_connection(handles.outgoing_dir.into_channel())?;
+    Ok(fs.collect::<()>().await)
+}
+
+#[fasync::run_singlethreaded(test)]
+async fn test_runtime_dispatcher() -> Result<()> {
+    let (sender, mut receiver) = mpsc::channel(1);
+
+    // Create the RealmBuilder.
+    let builder = RealmBuilder::new().await?;
+    builder.driver_test_realm_manifest_setup("#meta/realm.cm").await?;
+    let waiter = builder
+        .add_local_child(
+            WAITER_NAME,
+            move |handles: LocalComponentHandles| {
+                Box::pin(waiter_component(handles, sender.clone()))
+            },
+            ChildOptions::new(),
+        )
+        .await?;
+    builder
+        .add_route(
+            Route::new()
+                .capability(Capability::protocol::<ft::WaiterMarker>())
+                .from(&waiter)
+                .to(Ref::child(fuchsia_driver_test::COMPONENT_NAME)),
+        )
+        .await?;
+    // Build the Realm.
+    let instance = builder.build().await?;
+    // Start the DriverTestRealm.
+    let args = fdt::RealmArgs {
+        root_driver: Some("#meta/root.cm".to_string()),
+        use_driver_framework_v2: Some(true),
+        ..fdt::RealmArgs::EMPTY
+    };
+    instance.driver_test_realm_start(args).await?;
+
+    // Wait for the driver to call Waiter.Done.
+    receiver.next().await.ok_or(anyhow!("Receiver failed"))
+}
diff --git a/src/devices/usb/lib/usb/BUILD.gn b/src/devices/usb/lib/usb/BUILD.gn
index 52ee482..65713ea 100644
--- a/src/devices/usb/lib/usb/BUILD.gn
+++ b/src/devices/usb/lib/usb/BUILD.gn
@@ -12,7 +12,7 @@
 zx_library("usb") {
   sdk = "source"
   sdk_publishable = "experimental"
-  sdk_name = "usb-experimental-driver-only"
+  sdk_name = "usb"
   sdk_headers = [
     "usb/request-cpp.h",
     "usb/usb-request.h",
diff --git a/src/graphics/lib/compute/forma/src/buffer.rs b/src/graphics/lib/compute/forma/src/buffer.rs
index bf2ef12..10b355d 100644
--- a/src/graphics/lib/compute/forma/src/buffer.rs
+++ b/src/graphics/lib/compute/forma/src/buffer.rs
@@ -8,7 +8,7 @@
 };
 
 pub use surpass::layout;
-use surpass::painter::Color;
+use surpass::painter::{CachedTile, Color};
 
 use layout::{Flusher, Layout};
 
@@ -98,9 +98,7 @@
 #[derive(Debug)]
 pub struct CacheInner {
     pub clear_color: Option<Color>,
-    // Not `Option<Vec<u32>>` because the vector is split and sent to different
-    // threads.
-    pub layers: Vec<Option<u32>>,
+    pub tiles: Vec<CachedTile>,
     pub width: Option<usize>,
     pub height: Option<usize>,
     _id_dropper: IdDropper,
@@ -163,7 +161,7 @@
             id,
             cache: Rc::new(RefCell::new(CacheInner {
                 clear_color: None,
-                layers: Vec::new(),
+                tiles: Vec::new(),
                 width: None,
                 height: None,
                 _id_dropper: IdDropper { id, buffers_with_caches },
@@ -176,7 +174,7 @@
         let mut cache = self.cache.borrow_mut();
 
         cache.clear_color = None;
-        cache.layers.fill(None);
+        cache.tiles.fill(CachedTile::default());
     }
 }
 
diff --git a/src/graphics/lib/compute/forma/src/renderer/cpu.rs b/src/graphics/lib/compute/forma/src/renderer/cpu.rs
index 3b1b387..0fd4888 100644
--- a/src/graphics/lib/compute/forma/src/renderer/cpu.rs
+++ b/src/graphics/lib/compute/forma/src/renderer/cpu.rs
@@ -11,7 +11,7 @@
 use rustc_hash::FxHashMap;
 use surpass::{
     layout::Layout,
-    painter::{self, Channel, Color, LayerProps, Props, Rect},
+    painter::{self, CachedTile, Channel, Color, LayerProps, Props, Rect},
     rasterizer::Rasterizer,
     Order, TILE_HEIGHT, TILE_WIDTH,
 };
@@ -66,7 +66,7 @@
             let tiles_len = buffer.layout.width_in_tiles() * buffer.layout.height_in_tiles();
             let cache = &layer_cache.cache;
 
-            cache.borrow_mut().layers.resize(tiles_len, None);
+            cache.borrow_mut().tiles.resize(tiles_len, CachedTile::default());
 
             if cache.borrow().width != Some(buffer.layout.width())
                 || cache.borrow().height != Some(buffer.layout.height())
@@ -155,8 +155,8 @@
                 .as_ref()
                 .and_then(|layer_cache| layer_cache.cache.borrow().clear_color);
 
-            let layers_per_tile = buffer.layer_cache.as_ref().map(|layer_cache| {
-                RefMut::map(layer_cache.cache.borrow_mut(), |cache| &mut cache.layers)
+            let cached_tiles = buffer.layer_cache.as_ref().map(|layer_cache| {
+                RefMut::map(layer_cache.cache.borrow_mut(), |cache| &mut cache.tiles)
             });
 
             {
@@ -167,7 +167,7 @@
                     channels,
                     buffer.flusher.as_deref(),
                     previous_clear_color,
-                    layers_per_tile,
+                    cached_tiles,
                     rasterizer.segments(),
                     clear_color,
                     &crop,
diff --git a/src/graphics/lib/compute/surpass/src/painter/layer_workbench/mod.rs b/src/graphics/lib/compute/surpass/src/painter/layer_workbench/mod.rs
index a1c812b..01db9ff 100755
--- a/src/graphics/lib/compute/surpass/src/painter/layer_workbench/mod.rs
+++ b/src/graphics/lib/compute/surpass/src/painter/layer_workbench/mod.rs
@@ -12,8 +12,8 @@
 
 use crate::{
     painter::{
-        layer_workbench::passes::PassesSharedState, Color, Cover, CoverCarry, FillRule, Func,
-        LayerProps, Props, Style,
+        layer_workbench::passes::PassesSharedState, CachedTile, Channel, Color, Cover, CoverCarry,
+        FillRule, Func, LayerProps, Props, Style,
     },
     rasterizer::{self, PixelSegment},
     TILE_HEIGHT, TILE_WIDTH,
@@ -104,9 +104,15 @@
 }
 
 #[derive(Debug, PartialEq)]
-pub enum TileWriteOp {
+pub enum OptimizerTileWriteOp {
     None,
     Solid(Color),
+}
+
+#[derive(Debug, PartialEq)]
+pub enum TileWriteOp {
+    None,
+    Solid([u8; 4]),
     ColorBuffer,
 }
 
@@ -115,8 +121,9 @@
     pub tile_y: usize,
     pub segments: &'c [PixelSegment<TILE_WIDTH, TILE_HEIGHT>],
     pub props: &'c P,
-    pub previous_clear_color: Option<Color>,
-    pub previous_layers: Cell<Option<&'c mut Option<u32>>>,
+    pub cached_clear_color: Option<Color>,
+    pub channels: [Channel; 4],
+    pub cached_tile: Option<&'c CachedTile>,
     pub clear_color: Color,
 }
 
@@ -205,7 +212,7 @@
     fn optimization_passes<'c, P: LayerProps>(
         &mut self,
         context: &'c Context<'_, P>,
-    ) -> ControlFlow<TileWriteOp> {
+    ) -> ControlFlow<OptimizerTileWriteOp> {
         let state = &mut self.state;
         let passes_shared_state = &mut self.passes_shared_state;
 
@@ -250,7 +257,9 @@
     ) -> TileWriteOp {
         self.populate_layers(context);
 
-        if let ControlFlow::Break(tile_op) = self.optimization_passes(context) {
+        if let ControlFlow::Break(tile_op) =
+            CachedTile::convert_optimizer_op(self.optimization_passes(context), context)
+        {
             for &id in self.state.ids.iter() {
                 if let Some(cover_carry) = self.cover_carry(context, id) {
                     self.state.next_queue.push(cover_carry);
@@ -319,14 +328,17 @@
     use std::borrow::Cow;
 
     use crate::{
-        painter::{layer_workbench::passes, style::Color, BlendMode, Fill, Props},
+        painter::{layer_workbench::passes, style::Color, BlendMode, Fill, Props, RGBA},
         simd::{i8x16, Simd},
         PIXEL_WIDTH, TILE_HEIGHT,
     };
 
-    const WHITE: Color = Color { r: 1.0, g: 1.0, b: 1.0, a: 1.0 };
-    const BLACK: Color = Color { r: 0.0, g: 0.0, b: 0.0, a: 0.0 };
-    const RED: Color = Color { r: 1.0, g: 0.0, b: 0.0, a: 1.0 };
+    const WHITEF: Color = Color { r: 1.0, g: 1.0, b: 1.0, a: 1.0 };
+    const BLACKF: Color = Color { r: 0.0, g: 0.0, b: 0.0, a: 0.0 };
+    const REDF: Color = Color { r: 1.0, g: 0.0, b: 0.0, a: 1.0 };
+
+    const RED: [u8; 4] = [255, 0, 0, 255];
+    const WHITE: [u8; 4] = [255, 255, 255, 255];
 
     impl<T: PartialEq, const N: usize> PartialEq<[T; N]> for MaskedVec<T> {
         fn eq(&self, other: &[T; N]) -> bool {
@@ -334,6 +346,37 @@
         }
     }
 
+    struct UnimplementedPainter;
+
+    impl LayerPainter for UnimplementedPainter {
+        fn clear_cells(&mut self) {
+            unimplemented!();
+        }
+
+        fn acc_segment(&mut self, _segment: PixelSegment<TILE_WIDTH, TILE_HEIGHT>) {
+            unimplemented!();
+        }
+
+        fn acc_cover(&mut self, _cover: Cover) {
+            unimplemented!();
+        }
+
+        fn clear(&mut self, _color: Color) {
+            unimplemented!();
+        }
+
+        fn paint_layer(
+            &mut self,
+            _tile_x: usize,
+            _tile_y: usize,
+            _layer_id: u32,
+            _props: &Props,
+            _apply_clip: bool,
+        ) -> Cover {
+            unimplemented!()
+        }
+    }
+
     #[test]
     fn masked_vec() {
         let mut v = MaskedVec::default();
@@ -424,9 +467,10 @@
                 segment(5),
             ],
             props: &UnimplementedProps,
-            previous_clear_color: Some(BLACK),
-            previous_layers: Cell::default(),
-            clear_color: BLACK,
+            cached_clear_color: Some(BLACKF),
+            cached_tile: None,
+            channels: RGBA,
+            clear_color: BLACKF,
         };
 
         workbench.populate_layers(&context);
@@ -469,16 +513,18 @@
             }
         }
 
-        let mut layers = Some(4);
+        let mut cached_tiles = CachedTile::default();
+        cached_tiles.update_layer_count(Some(4));
 
         let context = Context {
             tile_x: 0,
             tile_y: 0,
             segments: &[segment(0), segment(1), segment(2), segment(3), segment(4)],
             props: &TestProps,
-            previous_clear_color: Some(BLACK),
-            previous_layers: Cell::new(Some(&mut layers)),
-            clear_color: BLACK,
+            cached_clear_color: Some(BLACKF),
+            cached_tile: Some(&mut cached_tiles),
+            channels: RGBA,
+            clear_color: BLACKF,
         };
 
         workbench.populate_layers(&context);
@@ -490,18 +536,19 @@
                 &mut workbench.passes_shared_state,
                 &context
             ),
-            ControlFlow::Continue(())
+            ControlFlow::Continue(()),
         );
-        assert_eq!(layers, Some(5));
+        assert_eq!(cached_tiles.layer_count(), Some(5));
 
         let context = Context {
             tile_x: 0,
             tile_y: 0,
             segments: &[segment(0), segment(1), segment(2), segment(3), segment(4)],
             props: &TestProps,
-            previous_clear_color: Some(BLACK),
-            previous_layers: Cell::new(Some(&mut layers)),
-            clear_color: BLACK,
+            cached_clear_color: Some(BLACKF),
+            cached_tile: Some(&mut cached_tiles),
+            channels: RGBA,
+            clear_color: BLACKF,
         };
 
         // Skip should occur because the previous pass updated the number of layers.
@@ -511,18 +558,19 @@
                 &mut workbench.passes_shared_state,
                 &context
             ),
-            ControlFlow::Break(TileWriteOp::None)
+            ControlFlow::Break(OptimizerTileWriteOp::None),
         );
-        assert_eq!(layers, Some(5));
+        assert_eq!(cached_tiles.layer_count(), Some(5));
 
         let context = Context {
             tile_x: 0,
             tile_y: 0,
             segments: &[segment(1), segment(2), segment(3), segment(4), segment(5)],
             props: &TestProps,
-            previous_clear_color: Some(BLACK),
-            previous_layers: Cell::new(Some(&mut layers)),
-            clear_color: BLACK,
+            cached_clear_color: Some(BLACKF),
+            cached_tile: Some(&mut cached_tiles),
+            channels: RGBA,
+            clear_color: BLACKF,
         };
 
         workbench.next_tile();
@@ -535,18 +583,19 @@
                 &mut workbench.passes_shared_state,
                 &context
             ),
-            ControlFlow::Continue(())
+            ControlFlow::Continue(()),
         );
-        assert_eq!(layers, Some(5));
+        assert_eq!(cached_tiles.layer_count(), Some(5));
 
         let context = Context {
             tile_x: 0,
             tile_y: 0,
             segments: &[segment(0), segment(1), segment(2), segment(3), segment(4)],
             props: &TestProps,
-            previous_clear_color: Some(BLACK),
-            previous_layers: Cell::new(Some(&mut layers)),
-            clear_color: WHITE,
+            cached_clear_color: Some(BLACKF),
+            cached_tile: Some(&mut cached_tiles),
+            channels: RGBA,
+            clear_color: WHITEF,
         };
 
         workbench.next_tile();
@@ -559,9 +608,9 @@
                 &mut workbench.passes_shared_state,
                 &context
             ),
-            ControlFlow::Continue(())
+            ControlFlow::Continue(()),
         );
-        assert_eq!(layers, Some(5));
+        assert_eq!(cached_tiles.layer_count(), Some(5));
     }
 
     #[test]
@@ -598,9 +647,10 @@
             tile_y: 0,
             segments: &[],
             props: &TestProps,
-            previous_clear_color: Some(BLACK),
-            previous_layers: Cell::default(),
-            clear_color: BLACK,
+            cached_clear_color: Some(BLACKF),
+            cached_tile: None,
+            channels: RGBA,
+            clear_color: BLACKF,
         };
 
         workbench.populate_layers(&context);
@@ -644,9 +694,10 @@
             tile_y: 0,
             segments: &[],
             props: &TestProps,
-            previous_clear_color: Some(BLACK),
-            previous_layers: Cell::default(),
-            clear_color: BLACK,
+            cached_clear_color: Some(BLACKF),
+            cached_tile: None,
+            channels: RGBA,
+            clear_color: BLACKF,
         };
 
         workbench.populate_layers(&context);
@@ -691,9 +742,10 @@
             tile_y: 0,
             segments: &[],
             props: &TestProps,
-            previous_clear_color: Some(BLACK),
-            previous_layers: Cell::default(),
-            clear_color: BLACK,
+            cached_clear_color: Some(BLACKF),
+            cached_tile: None,
+            channels: RGBA,
+            clear_color: BLACKF,
         };
 
         workbench.populate_layers(&context);
@@ -734,9 +786,10 @@
             tile_y: 0,
             segments: &[segment(3)],
             props: &TestProps,
-            previous_clear_color: Some(BLACK),
-            previous_layers: Cell::default(),
-            clear_color: BLACK,
+            cached_clear_color: Some(BLACKF),
+            cached_tile: None,
+            channels: RGBA,
+            clear_color: BLACKF,
         };
 
         workbench.populate_layers(&context);
@@ -747,7 +800,7 @@
                 &mut workbench.passes_shared_state,
                 &context,
             ),
-            ControlFlow::Continue(())
+            ControlFlow::Continue(()),
         );
 
         assert_eq!(workbench.state.ids, [2, 3]);
@@ -787,9 +840,10 @@
             tile_y: 0,
             segments: &[],
             props: &TestProps,
-            previous_clear_color: Some(BLACK),
-            previous_layers: Cell::default(),
-            clear_color: BLACK,
+            cached_clear_color: Some(BLACKF),
+            cached_tile: None,
+            channels: RGBA,
+            clear_color: BLACKF,
         };
 
         workbench.populate_layers(&context);
@@ -800,12 +854,12 @@
                 &mut workbench.passes_shared_state,
                 &context,
             ),
-            ControlFlow::Break(TileWriteOp::Solid(Color {
+            ControlFlow::Break(OptimizerTileWriteOp::Solid(Color {
                 r: 0.28125,
                 g: 0.28125,
                 b: 0.28125,
-                a: 0.75,
-            }))
+                a: 0.75
+            })),
         );
     }
 
@@ -839,9 +893,10 @@
             tile_y: 0,
             segments: &[],
             props: &TestProps,
-            previous_clear_color: Some(WHITE),
-            previous_layers: Cell::default(),
-            clear_color: WHITE,
+            cached_clear_color: Some(WHITEF),
+            cached_tile: None,
+            channels: RGBA,
+            clear_color: WHITEF,
         };
 
         workbench.populate_layers(&context);
@@ -852,12 +907,12 @@
                 &mut workbench.passes_shared_state,
                 &context,
             ),
-            ControlFlow::Break(TileWriteOp::Solid(Color {
+            ControlFlow::Break(OptimizerTileWriteOp::Solid(Color {
                 r: 0.5625,
                 g: 0.5625,
                 b: 0.5625,
-                a: 1.0,
-            }))
+                a: 1.0
+            })),
         );
     }
 
@@ -894,9 +949,10 @@
             tile_y: 0,
             segments: &[],
             props: &TestProps,
-            previous_clear_color: Some(WHITE),
-            previous_layers: Cell::default(),
-            clear_color: WHITE,
+            cached_clear_color: Some(WHITEF),
+            cached_tile: None,
+            channels: RGBA,
+            clear_color: WHITEF,
         };
 
         workbench.populate_layers(&context);
@@ -907,7 +963,7 @@
                 &mut workbench.passes_shared_state,
                 &context,
             ),
-            ControlFlow::Continue(())
+            ControlFlow::Continue(()),
         );
     }
 
@@ -938,37 +994,6 @@
             }
         }
 
-        struct UnimplementedPainter;
-
-        impl LayerPainter for UnimplementedPainter {
-            fn clear_cells(&mut self) {
-                unimplemented!();
-            }
-
-            fn acc_segment(&mut self, _segment: PixelSegment<TILE_WIDTH, TILE_HEIGHT>) {
-                unimplemented!();
-            }
-
-            fn acc_cover(&mut self, _cover: Cover) {
-                unimplemented!();
-            }
-
-            fn clear(&mut self, _color: Color) {
-                unimplemented!();
-            }
-
-            fn paint_layer(
-                &mut self,
-                _tile_x: usize,
-                _tile_y: usize,
-                _layer_id: u32,
-                _props: &Props,
-                _apply_clip: bool,
-            ) -> Cover {
-                unimplemented!()
-            }
-        }
-
         workbench.init([cover(0, CoverType::Partial), cover(1, CoverType::Full)]);
 
         let context = Context {
@@ -976,14 +1001,15 @@
             tile_y: 0,
             segments: &[],
             props: &TestProps,
-            previous_clear_color: Some(WHITE),
-            previous_layers: Cell::default(),
-            clear_color: WHITE,
+            cached_clear_color: Some(WHITEF),
+            cached_tile: None,
+            channels: RGBA,
+            clear_color: WHITEF,
         };
 
         assert_eq!(
             workbench.drive_tile_painting(&mut UnimplementedPainter, &context),
-            TileWriteOp::Solid(Color { r: 0.75, g: 0.75, b: 0.75, a: 1.0 })
+            TileWriteOp::Solid([224, 224, 224, 255]),
         );
     }
 
@@ -997,7 +1023,7 @@
             fn get(&self, layer_id: u32) -> Cow<'_, Props> {
                 if layer_id == 2 {
                     return Cow::Owned(Props {
-                        func: Func::Draw(Style { fill: Fill::Solid(RED), ..Default::default() }),
+                        func: Func::Draw(Style { fill: Fill::Solid(REDF), ..Default::default() }),
                         ..Default::default()
                     });
                 }
@@ -1016,16 +1042,18 @@
             cover(2, CoverType::Full),
         ]);
 
-        let mut layers = Some(3);
+        let mut cached_tiles = CachedTile::default();
+        cached_tiles.update_layer_count(Some(3));
 
         let context = Context {
             tile_x: 0,
             tile_y: 0,
             segments: &[],
             props: &TestProps,
-            previous_clear_color: Some(BLACK),
-            previous_layers: Cell::new(Some(&mut layers)),
-            clear_color: BLACK,
+            cached_clear_color: Some(BLACKF),
+            cached_tile: Some(&mut cached_tiles),
+            channels: RGBA,
+            clear_color: BLACKF,
         };
 
         workbench.populate_layers(&context);
@@ -1037,7 +1065,7 @@
                 &mut workbench.passes_shared_state,
                 &context
             ),
-            ControlFlow::Continue(())
+            ControlFlow::Continue(()),
         );
         // However, we can still skip drawing because everything visible is unchanged.
         assert_eq!(
@@ -1046,19 +1074,20 @@
                 &mut workbench.passes_shared_state,
                 &context,
             ),
-            ControlFlow::Break(TileWriteOp::None)
+            ControlFlow::Break(OptimizerTileWriteOp::None),
         );
 
-        layers = Some(2);
+        cached_tiles.update_layer_count(Some(2));
 
         let context = Context {
             tile_x: 0,
             tile_y: 0,
             segments: &[],
             props: &TestProps,
-            previous_clear_color: Some(BLACK),
-            previous_layers: Cell::new(Some(&mut layers)),
-            clear_color: BLACK,
+            cached_clear_color: Some(BLACKF),
+            cached_tile: Some(&mut cached_tiles),
+            channels: RGBA,
+            clear_color: BLACKF,
         };
 
         workbench.populate_layers(&context);
@@ -1070,7 +1099,7 @@
                 &mut workbench.passes_shared_state,
                 &context
             ),
-            ControlFlow::Continue(())
+            ControlFlow::Continue(()),
         );
         // We can still skip the tile because any newly added layer is covered by an opaque layer.
         assert_eq!(
@@ -1079,19 +1108,20 @@
                 &mut workbench.passes_shared_state,
                 &context,
             ),
-            ControlFlow::Break(TileWriteOp::None)
+            ControlFlow::Break(OptimizerTileWriteOp::None),
         );
 
-        layers = Some(4);
+        cached_tiles.update_layer_count(Some(4));
 
         let context = Context {
             tile_x: 0,
             tile_y: 0,
             segments: &[],
             props: &TestProps,
-            previous_clear_color: Some(BLACK),
-            previous_layers: Cell::new(Some(&mut layers)),
-            clear_color: BLACK,
+            cached_clear_color: Some(BLACKF),
+            cached_tile: Some(&mut cached_tiles),
+            channels: RGBA,
+            clear_color: BLACKF,
         };
 
         workbench.populate_layers(&context);
@@ -1103,7 +1133,7 @@
                 &mut workbench.passes_shared_state,
                 &context
             ),
-            ControlFlow::Continue(())
+            ControlFlow::Continue(()),
         );
         // This time we cannot skip because there might have been a visible layer
         // last frame that is now removed.
@@ -1113,7 +1143,93 @@
                 &mut workbench.passes_shared_state,
                 &context,
             ),
-            ControlFlow::Break(TileWriteOp::Solid(RED))
+            ControlFlow::Break(OptimizerTileWriteOp::Solid(REDF)),
+        );
+    }
+
+    #[test]
+    fn skip_solid_color_is_unchanged() {
+        let mut workbench = LayerWorkbench::default();
+
+        struct TestProps;
+
+        impl LayerProps for TestProps {
+            fn get(&self, _layer_id: u32) -> Cow<'_, Props> {
+                Cow::Owned(Props {
+                    func: Func::Draw(Style { fill: Fill::Solid(REDF), ..Default::default() }),
+                    ..Default::default()
+                })
+            }
+
+            fn is_unchanged(&self, _layer_id: u32) -> bool {
+                false
+            }
+        }
+
+        workbench.init([cover(0, CoverType::Full)]);
+
+        let context = Context {
+            tile_x: 0,
+            tile_y: 0,
+            segments: &[],
+            props: &TestProps,
+            cached_clear_color: Some(BLACKF),
+            cached_tile: None,
+            channels: RGBA,
+            clear_color: BLACKF,
+        };
+
+        workbench.populate_layers(&context);
+
+        // We can't skip drawing because we don't have any cached tile.
+        assert_eq!(
+            workbench.drive_tile_painting(&mut UnimplementedPainter, &context),
+            TileWriteOp::Solid(RED),
+        );
+
+        let mut cached_tiles = CachedTile::default();
+        cached_tiles.update_layer_count(Some(0));
+        cached_tiles.update_solid_color(Some(WHITE));
+
+        let context = Context {
+            tile_x: 0,
+            tile_y: 0,
+            segments: &[],
+            props: &TestProps,
+            cached_clear_color: Some(BLACKF),
+            cached_tile: Some(&mut cached_tiles),
+            channels: RGBA,
+            clear_color: BLACKF,
+        };
+
+        workbench.populate_layers(&context);
+
+        // We can't skip drawing because the tile solid color (RED) is different from the previous one (WHITE).
+        assert_eq!(
+            workbench.drive_tile_painting(&mut UnimplementedPainter, &context),
+            TileWriteOp::Solid(RED),
+        );
+
+        cached_tiles.update_layer_count(Some(0));
+        cached_tiles.update_solid_color(Some(RED));
+
+        let context = Context {
+            tile_x: 0,
+            tile_y: 0,
+            segments: &[],
+            props: &TestProps,
+            cached_clear_color: Some(BLACKF),
+            cached_tile: Some(&mut cached_tiles),
+            channels: RGBA,
+            clear_color: BLACKF,
+        };
+
+        workbench.populate_layers(&context);
+
+        // We can skip drawing because the tile solid color is unchanged.
+        assert_eq!(
+            workbench.drive_tile_painting(&mut UnimplementedPainter, &context),
+            TileWriteOp::None,
         );
     }
 }
diff --git a/src/graphics/lib/compute/surpass/src/painter/layer_workbench/passes/skip_fully_covered_layers.rs b/src/graphics/lib/compute/surpass/src/painter/layer_workbench/passes/skip_fully_covered_layers.rs
index 7f2a978..6140089 100644
--- a/src/graphics/lib/compute/surpass/src/painter/layer_workbench/passes/skip_fully_covered_layers.rs
+++ b/src/graphics/lib/compute/surpass/src/painter/layer_workbench/passes/skip_fully_covered_layers.rs
@@ -5,7 +5,9 @@
 use std::ops::ControlFlow;
 
 use crate::painter::{
-    layer_workbench::{passes::PassesSharedState, Context, LayerWorkbenchState, TileWriteOp},
+    layer_workbench::{
+        passes::PassesSharedState, Context, LayerWorkbenchState, OptimizerTileWriteOp,
+    },
     BlendMode, Color, Fill, Func, LayerProps, Style,
 };
 
@@ -13,7 +15,7 @@
     workbench: &'w mut LayerWorkbenchState,
     state: &'w mut PassesSharedState,
     context: &'c Context<'_, P>,
-) -> ControlFlow<TileWriteOp> {
+) -> ControlFlow<OptimizerTileWriteOp> {
     #[derive(Debug)]
     enum InterestingCover {
         Opaque(Color),
@@ -65,7 +67,7 @@
         Some(InterestingCover::Opaque(color)) => {
             // All visible layers are unchanged so we can skip drawing altogether.
             if visible_layers_are_unchanged {
-                return ControlFlow::Break(TileWriteOp::None);
+                return ControlFlow::Break(OptimizerTileWriteOp::None);
             }
 
             (1, color)
@@ -87,7 +89,7 @@
     });
 
     match color {
-        Some(color) => ControlFlow::Break(TileWriteOp::Solid(color)),
+        Some(color) => ControlFlow::Break(OptimizerTileWriteOp::Solid(color)),
         None => ControlFlow::Continue(()),
     }
 }
diff --git a/src/graphics/lib/compute/surpass/src/painter/layer_workbench/passes/skip_trivial_clips.rs b/src/graphics/lib/compute/surpass/src/painter/layer_workbench/passes/skip_trivial_clips.rs
index ba1da2c..3ed8488 100644
--- a/src/graphics/lib/compute/surpass/src/painter/layer_workbench/passes/skip_trivial_clips.rs
+++ b/src/graphics/lib/compute/surpass/src/painter/layer_workbench/passes/skip_trivial_clips.rs
@@ -6,7 +6,7 @@
 
 use crate::painter::{
     layer_workbench::{
-        passes::PassesSharedState, Context, Index, LayerWorkbenchState, TileWriteOp,
+        passes::PassesSharedState, Context, Index, LayerWorkbenchState, OptimizerTileWriteOp,
     },
     Func, LayerProps, Style,
 };
@@ -15,7 +15,7 @@
     workbench: &'w mut LayerWorkbenchState,
     state: &'w mut PassesSharedState,
     context: &'c Context<'_, P>,
-) -> ControlFlow<TileWriteOp> {
+) -> ControlFlow<OptimizerTileWriteOp> {
     struct Clip {
         is_full: bool,
         last_layer_id: u32,
diff --git a/src/graphics/lib/compute/surpass/src/painter/layer_workbench/passes/tile_unchanged.rs b/src/graphics/lib/compute/surpass/src/painter/layer_workbench/passes/tile_unchanged.rs
index 64d6fc0..d911351 100644
--- a/src/graphics/lib/compute/surpass/src/painter/layer_workbench/passes/tile_unchanged.rs
+++ b/src/graphics/lib/compute/surpass/src/painter/layer_workbench/passes/tile_unchanged.rs
@@ -2,10 +2,12 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-use std::{mem, ops::ControlFlow};
+use std::ops::ControlFlow;
 
 use crate::painter::{
-    layer_workbench::{passes::PassesSharedState, Context, LayerWorkbenchState, TileWriteOp},
+    layer_workbench::{
+        passes::PassesSharedState, Context, LayerWorkbenchState, OptimizerTileWriteOp,
+    },
     LayerProps,
 };
 
@@ -13,26 +15,26 @@
     workbench: &'w mut LayerWorkbenchState,
     state: &'w mut PassesSharedState,
     context: &'c Context<'_, P>,
-) -> ControlFlow<TileWriteOp> {
+) -> ControlFlow<OptimizerTileWriteOp> {
     let clear_color_is_unchanged = context
-        .previous_clear_color
+        .cached_clear_color
         .map(|previous_clear_color| previous_clear_color == context.clear_color)
         .unwrap_or_default();
 
-    let tile_paint = context.previous_layers.take().and_then(|previous_layers| {
+    let tile_paint = context.cached_tile.as_ref().and_then(|cached_tile| {
         let layers = workbench.ids.len() as u32;
+        let previous_layers = cached_tile.update_layer_count(Some(layers));
 
-        let is_unchanged = if let Some(previous_layers) = previous_layers {
-            let old_layers = mem::replace(previous_layers, layers);
-            state.layers_were_removed = layers < old_layers;
+        let is_unchanged = previous_layers
+            .map(|previous_layers| {
+                state.layers_were_removed = layers < previous_layers;
 
-            old_layers == layers && workbench.ids.iter().all(|&id| context.props.is_unchanged(id))
-        } else {
-            *previous_layers = Some(layers);
-            false
-        };
+                previous_layers == layers
+                    && workbench.ids.iter().all(|&id| context.props.is_unchanged(id))
+            })
+            .unwrap_or_default();
 
-        (clear_color_is_unchanged && is_unchanged).then(|| TileWriteOp::None)
+        (clear_color_is_unchanged && is_unchanged).then(|| OptimizerTileWriteOp::None)
     });
 
     match tile_paint {
diff --git a/src/graphics/lib/compute/surpass/src/painter/mod.rs b/src/graphics/lib/compute/surpass/src/painter/mod.rs
index e1ed084..6bb3fb3 100755
--- a/src/graphics/lib/compute/surpass/src/painter/mod.rs
+++ b/src/graphics/lib/compute/surpass/src/painter/mod.rs
@@ -6,7 +6,9 @@
     borrow::Cow,
     cell::{Cell, RefCell, RefMut},
     collections::BTreeMap,
+    convert::TryInto,
     mem,
+    ops::ControlFlow,
     ops::Range,
     slice::ChunksExactMut,
 };
@@ -15,7 +17,7 @@
 
 use crate::{
     layout::{Flusher, Layout, Slice, TileFill},
-    painter::layer_workbench::TileWriteOp,
+    painter::layer_workbench::{OptimizerTileWriteOp, TileWriteOp},
     rasterizer::{search_last_by_key, PixelSegment},
     simd::{f32x4, f32x8, i16x16, i32x8, i8x16, u32x4, u32x8, u8x32, Simd},
     PIXEL_DOUBLE_WIDTH, PIXEL_WIDTH, TILE_HEIGHT, TILE_WIDTH,
@@ -476,7 +478,7 @@
         channels: [Channel; 4],
         clear_color: Color,
         previous_clear_color: Option<Color>,
-        mut previous_layers: Option<&mut [Option<u32>]>,
+        cached_tiles: Option<&[CachedTile]>,
         row: ChunksExactMut<'_, Slice<'_, u8>>,
         crop: &Option<Rect>,
         flusher: Option<&dyn Flusher>,
@@ -521,10 +523,9 @@
                 tile_y,
                 segments: current_segments,
                 props,
-                previous_clear_color,
-                previous_layers: Cell::new(
-                    previous_layers.as_mut().map(|layers_per_tile| &mut layers_per_tile[tile_x]),
-                ),
+                cached_clear_color: previous_clear_color,
+                cached_tile: cached_tiles.map(|cached_tiles| &cached_tiles[tile_x]),
+                channels,
                 clear_color,
             };
 
@@ -532,10 +533,7 @@
 
             match workbench.drive_tile_painting(self, &context) {
                 TileWriteOp::None => (),
-                TileWriteOp::Solid(color) => {
-                    let color = channels.map(|c| color.channel(c));
-                    L::write(slices, flusher, TileFill::Solid(to_srgb_bytes(color)))
-                }
+                TileWriteOp::Solid(color) => L::write(slices, flusher, TileFill::Solid(color)),
                 TileWriteOp::ColorBuffer => {
                     self.compute_srgb(channels);
                     let colors: &[[u8; 4]] = unsafe {
@@ -566,7 +564,7 @@
     j: usize,
     row: ChunksExactMut<'_, Slice<'_, u8>>,
     previous_clear_color: Option<Color>,
-    layers_per_tile: Option<&mut [Option<u32>]>,
+    cached_tiles: Option<&[CachedTile]>,
     flusher: Option<&dyn Flusher>,
 ) {
     if let Some(rect) = crop {
@@ -600,7 +598,7 @@
             channels,
             clear_color,
             previous_clear_color,
-            layers_per_tile,
+            cached_tiles,
             row,
             crop,
             flusher,
@@ -608,6 +606,92 @@
     });
 }
 
+#[derive(Clone, Debug, Default)]
+pub struct CachedTile {
+    // Bitfield used to store the existence of `layer_count` and `solid_color` values
+    tags: Cell<u8>,
+    // (0b0x)
+    layer_count: Cell<[u8; 3]>,
+    // (0bx0)
+    solid_color: Cell<[u8; 4]>,
+}
+
+impl CachedTile {
+    pub fn layer_count(&self) -> Option<u32> {
+        let layer_count = self.layer_count.get();
+        let layer_count = u32::from_le_bytes([layer_count[0], layer_count[1], layer_count[2], 0]);
+
+        match self.tags.get() {
+            0b10 | 0b11 => Some(layer_count),
+            _ => None,
+        }
+    }
+
+    pub fn solid_color(&self) -> Option<[u8; 4]> {
+        match self.tags.get() {
+            0b01 | 0b11 => Some(self.solid_color.get()),
+            _ => None,
+        }
+    }
+
+    pub fn update_layer_count(&self, layer_count: Option<u32>) -> Option<u32> {
+        let previous_layer_count = self.layer_count();
+        match layer_count {
+            None => {
+                self.tags.set(self.tags.get() & 0b01);
+            }
+            Some(layer_count) => {
+                self.tags.set(self.tags.get() | 0b10);
+                self.layer_count.set(layer_count.to_le_bytes()[..3].try_into().unwrap());
+            }
+        };
+        previous_layer_count
+    }
+
+    pub fn update_solid_color(&self, solid_color: Option<[u8; 4]>) -> Option<[u8; 4]> {
+        let previous_solid_color = self.solid_color();
+        match solid_color {
+            None => {
+                self.tags.set(self.tags.get() & 0b10);
+            }
+            Some(color) => {
+                self.tags.set(self.tags.get() | 0b01);
+                self.solid_color.set(color);
+            }
+        };
+        previous_solid_color
+    }
+
+    pub fn convert_optimizer_op<'c, P: LayerProps>(
+        tile_op: ControlFlow<OptimizerTileWriteOp>,
+        context: &'c Context<'_, P>,
+    ) -> ControlFlow<TileWriteOp> {
+        match tile_op {
+            ControlFlow::Break(OptimizerTileWriteOp::Solid(color)) => {
+                let color = to_srgb_bytes(context.channels.map(|c| color.channel(c)));
+                let color_is_unchanged = context
+                    .cached_tile
+                    .as_ref()
+                    .map(|cached_tile| cached_tile.update_solid_color(Some(color)) == Some(color))
+                    .unwrap_or_default();
+
+                if color_is_unchanged {
+                    ControlFlow::Break(TileWriteOp::None)
+                } else {
+                    ControlFlow::Break(TileWriteOp::Solid(color))
+                }
+            }
+            ControlFlow::Break(OptimizerTileWriteOp::None) => ControlFlow::Break(TileWriteOp::None),
+            _ => {
+                context.cached_tile.as_ref().map(|cached_tile| {
+                    cached_tile.update_solid_color(None);
+                });
+                ControlFlow::Continue(())
+            }
+        }
+    }
+}
+
 #[allow(clippy::too_many_arguments)]
 #[inline]
 pub fn for_each_row<L: Layout, S: LayerProps>(
@@ -616,7 +700,7 @@
     channels: [Channel; 4],
     flusher: Option<&dyn Flusher>,
     previous_clear_color: Option<Color>,
-    layers_per_tile: Option<RefMut<'_, Vec<Option<u32>>>>,
+    cached_tiles: Option<RefMut<'_, Vec<CachedTile>>>,
     mut segments: &[PixelSegment<TILE_WIDTH, TILE_HEIGHT>],
     clear_color: Color,
     crop: &Option<Rect>,
@@ -631,12 +715,12 @@
     let row_of_tiles_len = width_in_tiles * layout.slices_per_tile();
     let mut slices = layout.slices(buffer);
 
-    if let Some(mut layers_per_tile) = layers_per_tile {
+    if let Some(mut cached_tiles) = cached_tiles {
         slices
             .par_chunks_mut(row_of_tiles_len)
-            .zip_eq(layers_per_tile.par_chunks_mut(width_in_tiles))
+            .zip_eq(cached_tiles.par_chunks_mut(width_in_tiles))
             .enumerate()
-            .for_each(|(j, (row_of_tiles, layers_per_tile))| {
+            .for_each(|(j, (row_of_tiles, cached_tiles))| {
                 print_row::<S, L>(
                     segments,
                     channels,
@@ -646,7 +730,7 @@
                     j,
                     row_of_tiles.chunks_exact_mut(row_of_tiles.len() / width_in_tiles),
                     previous_clear_color,
-                    Some(layers_per_tile),
+                    Some(cached_tiles),
                     flusher,
                 );
             });
@@ -806,8 +890,9 @@
             tile_y: 0,
             segments,
             props,
-            previous_clear_color: None,
-            previous_layers: Cell::new(None),
+            cached_clear_color: None,
+            cached_tile: None,
+            channels: RGBA,
             clear_color,
         };
 
@@ -1123,8 +1208,9 @@
             tile_y: 0,
             segments: &segments,
             props: &props,
-            previous_clear_color: None,
-            previous_layers: Cell::new(None),
+            cached_clear_color: None,
+            cached_tile: None,
+            channels: RGBA,
             clear_color: BLACK,
         };
 
@@ -1405,4 +1491,21 @@
 
         assert_eq!(buffer_layout.width_in_tiles() * buffer_layout.height_in_tiles(), 32);
     }
+
+    #[test]
+    fn cached_tiles() {
+        const RED: [u8; 4] = [255, 0, 0, 255];
+
+        let cached_tile = CachedTile::default();
+
+        // Solid color
+        assert_eq!(cached_tile.solid_color(), None);
+        assert_eq!(cached_tile.update_solid_color(Some(RED)), None);
+        assert_eq!(cached_tile.solid_color(), Some(RED));
+
+        // Layer count
+        assert_eq!(cached_tile.layer_count(), None);
+        assert_eq!(cached_tile.update_layer_count(Some(2)), None);
+        assert_eq!(cached_tile.layer_count(), Some(2));
+    }
 }
diff --git a/src/lib/assembly/structured_config/BUILD.gn b/src/lib/assembly/structured_config/BUILD.gn
index ed9c740..ac8645a 100644
--- a/src/lib/assembly/structured_config/BUILD.gn
+++ b/src/lib/assembly/structured_config/BUILD.gn
@@ -2,8 +2,6 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-import("//build/components.gni")
-import("//build/host.gni")
 import("//build/rust/rustc_library.gni")
 
 if (is_host) {
@@ -24,85 +22,9 @@
       "//tools/lib/config_value_file",
     ]
   }
-}
-
-fuchsia_component("pass_with_config") {
-  manifest = "meta/test_with_config.cml"
-  restricted_features = [ "structured_config" ]
-}
-
-fuchsia_structured_config_values("pass_with_config_values") {
-  cm_label = ":pass_with_config"
-  values_source = "meta/test_with_config_values.json5"
-}
-
-fuchsia_component("fail_missing_config") {
-  manifest = "meta/test_with_config.cml"
-  restricted_features = [ "structured_config" ]
-}
-
-fuchsia_component("pass_without_config") {
-  manifest = "meta/test_without_config.cml"
-}
-
-# this package should only be depended upon by the test below, including it in a system image
-# should cause product validation to fail
-fuchsia_package("validate_structured_config_packaging") {
-  deps = [
-    ":fail_missing_config",
-    ":pass_with_config",
-    ":pass_with_config_values",
-    ":pass_without_config",
-  ]
-
-  # This package intentionally includes invalidly configured components for testing purposes.
-  validate_structured_config = false
-}
-
-if (is_host) {
-  package_out_dir = get_label_info(
-          ":validate_structured_config_packaging($default_toolchain)",
-          "target_out_dir")
-  test_package_manifest_path = "$package_out_dir/validate_structured_config_packaging/package_manifest.json"
-  test_meta_far_path =
-      "$package_out_dir/validate_structured_config_packaging/meta.far"
-
-  rustc_test("structured_config_test") {
-    disable_clippy = true  # TODO(fxbug.dev/86506): clippy needs env vars
-    sources = [ "src/test.rs" ]
-    source_root = "src/test.rs"
-    deps = [
-      ":structured_config",
-      "//src/lib/assembly/validate_product",
-      "//src/sys/pkg/lib/far/rust:fuchsia-archive",
-      "//src/sys/pkg/lib/fuchsia-pkg",
-      "//third_party/rust_crates:maplit",
-      "//third_party/rust_crates:serde_json",
-      "//third_party/rust_crates:tempfile",
-    ]
-    non_rust_deps = [ ":test_asset_data" ]
-    configs += [ ":test_asset_paths" ]
-  }
-
-  host_test_data("test_asset_data") {
-    sources = [
-      test_meta_far_path,
-      test_package_manifest_path,
-    ]
-    deps = [ ":validate_structured_config_packaging($default_toolchain)" ]
-  }
-
-  config("test_asset_paths") {
-    # host tests are run with cwd=out/default
-    rustenv = [
-      "TEST_META_FAR=" + rebase_path(test_meta_far_path, root_build_dir),
-      "TEST_MANIFEST_PATH=" +
-          rebase_path(test_package_manifest_path, root_build_dir),
-    ]
-  }
 
   group("host_tests") {
     testonly = true
-    deps = [ ":structured_config_test" ]
+    deps = [ "tests:host_tests" ]
   }
 }
diff --git a/src/lib/assembly/structured_config/src/lib.rs b/src/lib/assembly/structured_config/src/lib.rs
index 77d1631..ae6e8ba 100644
--- a/src/lib/assembly/structured_config/src/lib.rs
+++ b/src/lib/assembly/structured_config/src/lib.rs
@@ -102,10 +102,15 @@
     BuildPackage(#[source] anyhow::Error),
 }
 
+/// The list of runners currently supported by structured config.
+static SUPPORTED_RUNNERS: &[&str] = &["driver", "elf"];
+
 /// Validate a component manifest given access to the contents of its `/pkg` directory.
 ///
 /// Ensures that if a component has a `config` stanza it can be paired with the specified
 /// value file and that together they can produce a valid configuration for the component.
+///
+/// Also ensures that the component is using a runner which supports structured config.
 pub fn validate_component<Ns: PkgNamespace>(
     manifest_path: &str,
     reader: &mut Ns,
@@ -119,7 +124,20 @@
 
     // check for config
     if let Some(config_decl) = manifest.config {
-        // it's required, so find out where it's stored
+        // make sure the component has a runner that will deliver config before finding values
+        let runner = manifest
+            .program
+            .as_ref()
+            .ok_or(ValidationError::ProgramMissing)?
+            .runner
+            .as_ref()
+            .ok_or(ValidationError::RunnerMissing)?
+            .str();
+        if !SUPPORTED_RUNNERS.contains(&runner) {
+            return Err(ValidationError::UnsupportedRunner(runner.to_owned()));
+        }
+
+        // config is required, so find out where it's stored
         let cm_rust::ConfigValueSource::PackagePath(path) = &config_decl.value_source;
         let config_bytes = reader.read_file(&path).map_err(|source| {
             ValidationError::ConfigValuesMissing { path: path.to_owned(), source }
@@ -153,6 +171,12 @@
     ParseConfig(#[source] PersistentFidlError),
     #[error("Couldn't resolve config.")]
     ResolveConfig(#[source] config_encoder::ResolutionError),
+    #[error("Component manifest does not specify `program`.")]
+    ProgramMissing,
+    #[error("Component manifest does not specify `program.runner`.")]
+    RunnerMissing,
+    #[error("{:?} is not a supported runner. (allowed: {:?})", _0, SUPPORTED_RUNNERS)]
+    UnsupportedRunner(String),
 }
 
 /// Parse bytes as a FIDL type, passing it to a `cm_fidl_validator` function before converting
diff --git a/src/lib/assembly/structured_config/tests/BUILD.gn b/src/lib/assembly/structured_config/tests/BUILD.gn
new file mode 100644
index 0000000..6a03b51
--- /dev/null
+++ b/src/lib/assembly/structured_config/tests/BUILD.gn
@@ -0,0 +1,13 @@
+# Copyright 2022 The Fuchsia Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+if (is_host) {
+  group("host_tests") {
+    testonly = true
+    deps = [
+      "malformed_fail:host_tests",
+      "repackaging_pass:host_tests",
+    ]
+  }
+}
diff --git a/src/lib/assembly/structured_config/tests/malformed_fail/BUILD.gn b/src/lib/assembly/structured_config/tests/malformed_fail/BUILD.gn
new file mode 100644
index 0000000..8b44c7a
--- /dev/null
+++ b/src/lib/assembly/structured_config/tests/malformed_fail/BUILD.gn
@@ -0,0 +1,77 @@
+# Copyright 2022 The Fuchsia Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/components.gni")
+import("//build/host.gni")
+import("//build/rust/rustc_test.gni")
+
+fuchsia_component("fail_missing_program") {
+  manifest = "meta/test_without_program.cml"
+  restricted_features = [ "structured_config" ]
+  visibility = [ ":*" ]
+}
+
+fuchsia_component("fail_bad_runner") {
+  manifest = "meta/test_with_fake_runner.cml"
+  restricted_features = [ "structured_config" ]
+  visibility = [ ":*" ]
+}
+
+# this package should only be depended upon by the test below, including it in a system image
+# should cause product validation to fail
+fuchsia_package("malformed_for_validation_fail") {
+  deps = [
+    ":fail_bad_runner",
+    ":fail_missing_program",
+  ]
+
+  # This package intentionally includes invalidly configured components for testing purposes.
+  validate_structured_config = false
+  visibility = [ ":*" ]
+}
+
+if (is_host) {
+  package_out_dir =
+      get_label_info(":malformed_for_validation_fail($default_toolchain)",
+                     "target_out_dir")
+  test_meta_far_path = "$package_out_dir/malformed_for_validation_fail/meta.far"
+
+  host_test_data("test_asset_data") {
+    sources = [ test_meta_far_path ]
+    deps = [ ":malformed_for_validation_fail($default_toolchain)" ]
+    visibility = [ ":*" ]
+  }
+
+  config("test_asset_paths") {
+    # host tests are run with cwd=out/default
+    rustenv =
+        [ "TEST_META_FAR=" + rebase_path(test_meta_far_path, root_build_dir) ]
+    visibility = [ ":*" ]
+  }
+
+  rustc_test("structured_config_malformed_test") {
+    disable_clippy = true  # TODO(fxbug.dev/86506): clippy needs env vars
+    sources = [ "malformed.rs" ]
+    source_root = "malformed.rs"
+    deps = [
+      "//src/lib/assembly/structured_config",
+      "//src/sys/pkg/lib/far/rust:fuchsia-archive",
+      "//third_party/rust_crates:serde_json",
+    ]
+    non_rust_deps = [ ":test_asset_data" ]
+    configs += [ ":test_asset_paths" ]
+    visibility = [ ":*" ]
+  }
+
+  group("host_tests") {
+    testonly = true
+    deps = [ ":structured_config_malformed_test" ]
+
+    # Prevent this group from leaking packages into system images if incorrectly dep'd from target.
+    metadata = {
+      distribution_entries_barrier = []
+      package_barrier = []
+    }
+  }
+}
diff --git a/src/lib/assembly/structured_config/tests/malformed_fail/malformed.rs b/src/lib/assembly/structured_config/tests/malformed_fail/malformed.rs
new file mode 100644
index 0000000..8e490c3
--- /dev/null
+++ b/src/lib/assembly/structured_config/tests/malformed_fail/malformed.rs
@@ -0,0 +1,30 @@
+// Copyright 2022 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use assembly_structured_config::{validate_component, ValidationError};
+use fuchsia_archive::Reader;
+use std::io::Cursor;
+
+const FAIL_MISSING_PROGRAM: &str = "meta/fail_missing_program.cm";
+const FAIL_BAD_RUNNER: &str = "meta/fail_bad_runner.cm";
+
+fn malformed_test_meta_far() -> Reader<Cursor<Vec<u8>>> {
+    Reader::new(Cursor::new(std::fs::read(env!("TEST_META_FAR")).unwrap())).unwrap()
+}
+
+#[test]
+fn config_requires_program() {
+    match validate_component(FAIL_MISSING_PROGRAM, &mut malformed_test_meta_far()).unwrap_err() {
+        ValidationError::ProgramMissing => (),
+        other => panic!("expected missing program, got {}", other),
+    }
+}
+
+#[test]
+fn config_requires_known_good_runner() {
+    match validate_component(FAIL_BAD_RUNNER, &mut malformed_test_meta_far()).unwrap_err() {
+        ValidationError::UnsupportedRunner(runner) => assert_eq!(runner, "fake_runner"),
+        other => panic!("expected unsupported runner, got {}", other),
+    }
+}
diff --git a/src/lib/assembly/structured_config/tests/malformed_fail/meta/test_with_fake_runner.cml b/src/lib/assembly/structured_config/tests/malformed_fail/meta/test_with_fake_runner.cml
new file mode 100644
index 0000000..faa843b
--- /dev/null
+++ b/src/lib/assembly/structured_config/tests/malformed_fail/meta/test_with_fake_runner.cml
@@ -0,0 +1,8 @@
+{
+    program: {
+        runner: "fake_runner",
+    },
+    config: {
+        foo: { type: "bool" },
+    },
+}
diff --git a/src/lib/assembly/structured_config/meta/test_with_config.cml b/src/lib/assembly/structured_config/tests/malformed_fail/meta/test_without_program.cml
similarity index 100%
rename from src/lib/assembly/structured_config/meta/test_with_config.cml
rename to src/lib/assembly/structured_config/tests/malformed_fail/meta/test_without_program.cml
diff --git a/src/lib/assembly/structured_config/tests/repackaging_pass/BUILD.gn b/src/lib/assembly/structured_config/tests/repackaging_pass/BUILD.gn
new file mode 100644
index 0000000..8aaf59e
--- /dev/null
+++ b/src/lib/assembly/structured_config/tests/repackaging_pass/BUILD.gn
@@ -0,0 +1,110 @@
+# Copyright 2022 The Fuchsia Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/components.gni")
+import("//build/host.gni")
+import("//build/rust/rustc_binary.gni")
+
+rustc_binary("empty_bin") {
+  source_root = "empty_bin.rs"
+  sources = [ "empty_bin.rs" ]
+  visibility = [ ":*" ]
+}
+
+fuchsia_component("pass_with_config") {
+  manifest = "meta/test_with_config.cml"
+  restricted_features = [ "structured_config" ]
+  deps = [ ":empty_bin" ]
+  visibility = [ ":*" ]
+}
+
+fuchsia_structured_config_values("pass_with_config_values") {
+  cm_label = ":pass_with_config"
+  values_source = "meta/test_with_config_values.json5"
+  visibility = [ ":*" ]
+}
+
+fuchsia_component("fail_missing_config") {
+  manifest = "meta/test_with_config.cml"
+  restricted_features = [ "structured_config" ]
+  deps = [ ":empty_bin" ]
+  visibility = [ ":*" ]
+}
+
+fuchsia_component("pass_without_config") {
+  manifest = "meta/test_without_config.cml"
+  visibility = [ ":*" ]
+}
+
+# this package should only be depended upon by the test below, including it in a system image
+# should cause product validation to fail
+_package_name = "validate_structured_config_repackaging"
+fuchsia_package(_package_name) {
+  testonly = true
+  deps = [
+    ":fail_missing_config",
+    ":pass_with_config",
+    ":pass_with_config_values",
+    ":pass_without_config",
+  ]
+  visibility = [ ":*" ]
+
+  # This package intentionally includes invalidly configured components for testing purposes.
+  validate_structured_config = false
+}
+
+_archive_name = "${_package_name}_archive"
+fuchsia_package_archive(_archive_name) {
+  testonly = true
+  package = ":$_package_name"
+  visibility = [ ":*" ]
+}
+
+if (is_host) {
+  _archive_out_dir =
+      get_label_info(":$_archive_name($default_toolchain)", "target_out_dir")
+  _test_package_archive_path = "$_archive_out_dir/$_package_name.far"
+
+  host_test_data("test_asset_data") {
+    sources = [ _test_package_archive_path ]
+    deps = [ ":$_archive_name($default_toolchain)" ]
+    visibility = [ ":*" ]
+  }
+
+  config("test_asset_paths") {
+    # host tests are run with cwd=out/default
+    rustenv = [ "TEST_PACKAGE_FAR=" +
+                rebase_path(_test_package_archive_path, root_build_dir) ]
+    visibility = [ ":*" ]
+  }
+
+  rustc_test("structured_config_repackaging_test") {
+    disable_clippy = true  # TODO(fxbug.dev/86506): clippy needs env vars
+    sources = [ "repackaging.rs" ]
+    source_root = "repackaging.rs"
+    deps = [
+      "//src/lib/assembly/structured_config",
+      "//src/lib/assembly/validate_product",
+      "//src/sys/pkg/lib/far/rust:fuchsia-archive",
+      "//src/sys/pkg/lib/fuchsia-pkg",
+      "//third_party/rust_crates:maplit",
+      "//third_party/rust_crates:serde_json",
+      "//third_party/rust_crates:tempfile",
+    ]
+    non_rust_deps = [ ":test_asset_data" ]
+    configs += [ ":test_asset_paths" ]
+    visibility = [ ":*" ]
+  }
+
+  group("host_tests") {
+    testonly = true
+    deps = [ ":structured_config_repackaging_test" ]
+
+    # Prevent this group from leaking packages into system images if incorrectly dep'd from target.
+    metadata = {
+      distribution_entries_barrier = []
+      package_barrier = []
+    }
+  }
+}
diff --git a/src/lib/assembly/structured_config/tests/repackaging_pass/empty_bin.rs b/src/lib/assembly/structured_config/tests/repackaging_pass/empty_bin.rs
new file mode 100644
index 0000000..f080382
--- /dev/null
+++ b/src/lib/assembly/structured_config/tests/repackaging_pass/empty_bin.rs
@@ -0,0 +1,5 @@
+// Copyright 2022 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+fn main() {}
diff --git a/src/lib/assembly/structured_config/tests/repackaging_pass/meta/test_with_config.cml b/src/lib/assembly/structured_config/tests/repackaging_pass/meta/test_with_config.cml
new file mode 100644
index 0000000..d6e0662
--- /dev/null
+++ b/src/lib/assembly/structured_config/tests/repackaging_pass/meta/test_with_config.cml
@@ -0,0 +1,11 @@
+{
+    // TODO(https://fxbug.dev/97805) remove this shard once the empty binary doesn't need it
+    include: [ "syslog/client.shard.cml" ],
+    program: {
+        runner: "elf",
+        binary: "bin/empty_bin",
+    },
+    config: {
+        foo: { type: "bool" },
+    },
+}
diff --git a/src/lib/assembly/structured_config/meta/test_with_config_values.json5 b/src/lib/assembly/structured_config/tests/repackaging_pass/meta/test_with_config_values.json5
similarity index 100%
rename from src/lib/assembly/structured_config/meta/test_with_config_values.json5
rename to src/lib/assembly/structured_config/tests/repackaging_pass/meta/test_with_config_values.json5
diff --git a/src/lib/assembly/structured_config/meta/test_without_config.cml b/src/lib/assembly/structured_config/tests/repackaging_pass/meta/test_without_config.cml
similarity index 100%
rename from src/lib/assembly/structured_config/meta/test_without_config.cml
rename to src/lib/assembly/structured_config/tests/repackaging_pass/meta/test_without_config.cml
diff --git a/src/lib/assembly/structured_config/src/test.rs b/src/lib/assembly/structured_config/tests/repackaging_pass/repackaging.rs
similarity index 61%
rename from src/lib/assembly/structured_config/src/test.rs
rename to src/lib/assembly/structured_config/tests/repackaging_pass/repackaging.rs
index 9e3e87a..f6e7276 100644
--- a/src/lib/assembly/structured_config/src/test.rs
+++ b/src/lib/assembly/structured_config/tests/repackaging_pass/repackaging.rs
@@ -5,7 +5,7 @@
 use assembly_structured_config::{validate_component, Repackager, ValidationError};
 use assembly_validate_product::{validate_package, PackageValidationError};
 use fuchsia_archive::Reader;
-use fuchsia_pkg::{BlobInfo, PackageManifest};
+use fuchsia_pkg::{BlobInfo, PackageBuilder, PackageManifest};
 use maplit::btreemap;
 use std::io::Cursor;
 use tempfile::TempDir;
@@ -14,24 +14,39 @@
 const PASS_WITHOUT_CONFIG: &str = "meta/pass_without_config.cm";
 const FAIL_MISSING_CONFIG: &str = "meta/fail_missing_config.cm";
 
-const TEST_MANIFEST_PATH: &str = env!("TEST_MANIFEST_PATH");
+fn test_package_manifest() -> (PackageManifest, TempDir) {
+    // read the archive file
+    let archive =
+        Reader::new(Cursor::new(std::fs::read(env!("TEST_PACKAGE_FAR")).unwrap())).unwrap();
 
-fn test_package_manifest() -> PackageManifest {
-    PackageManifest::try_load_from(TEST_MANIFEST_PATH).unwrap()
+    // unpack the archive and create a manifest
+    let outdir = TempDir::new().unwrap();
+    let mut builder = PackageBuilder::from_archive(archive, outdir.path()).unwrap();
+    let manifest_path = outdir.path().join("package_manifest.json");
+    builder.manifest_path(&manifest_path);
+    builder.build(&outdir, outdir.path().join("meta.far")).unwrap();
+
+    // load the manifest back into memory for testing
+    let manifest = PackageManifest::try_load_from(&manifest_path).unwrap();
+    (manifest, outdir)
 }
 
-fn test_meta_far() -> Reader<Cursor<Vec<u8>>> {
-    Reader::new(Cursor::new(std::fs::read(env!("TEST_META_FAR")).unwrap())).unwrap()
+fn test_meta_far() -> (Reader<Cursor<Vec<u8>>>, TempDir) {
+    let (package_manifest, unpacked) = test_package_manifest();
+    let meta_far_source =
+        package_manifest.blobs().iter().find(|b| b.path == "meta/").unwrap().source_path.clone();
+    let reader = Reader::new(Cursor::new(std::fs::read(meta_far_source).unwrap())).unwrap();
+    (reader, unpacked)
 }
 
 /// Makes sure that we can "turn a package valid" if it's been produced by the build without value
 /// files.
 #[test]
 fn adding_config_makes_invalid_package_valid() {
-    let original_manifest = test_package_manifest();
+    let (original_manifest, unpacked_original) = test_package_manifest();
 
     // ensure that product validation will fail without us doing anything
-    match validate_package(TEST_MANIFEST_PATH) {
+    match validate_package(unpacked_original.path().join("package_manifest.json")) {
         Err(PackageValidationError::InvalidComponents(..)) => (),
         other => panic!("expected validation to fail with invalid components, got {:#?}", other),
     }
@@ -51,7 +66,7 @@
 /// Makes sure that the product assembly tooling never silently squashes an existing value file.
 #[test]
 fn cant_add_config_on_top_of_existing_values() {
-    let original_manifest = test_package_manifest();
+    let (original_manifest, _unpacked_original) = test_package_manifest();
     let temp = TempDir::new().unwrap();
     let mut repackager = Repackager::new(original_manifest.clone(), temp.path()).unwrap();
     repackager
@@ -62,7 +77,7 @@
 /// Checks against unintended side effects from repackaging.
 #[test]
 fn repackaging_with_no_config_produces_identical_manifest() {
-    let original_manifest = test_package_manifest();
+    let (original_manifest, _unpacked_original) = test_package_manifest();
 
     let temp = TempDir::new().unwrap();
     let repackager = Repackager::new(original_manifest.clone(), temp.path()).unwrap();
@@ -77,11 +92,15 @@
         "repackaging without config must not change # of blobs"
     );
 
+    // repackaging might change order of blobs in the manifests, sort for consistency
+    let mut original_blobs: Vec<_> = original_manifest.blobs().iter().collect();
+    let mut new_blobs: Vec<_> = new_manifest.blobs().iter().collect();
+    original_blobs.sort_by_key(|b| &b.path);
+    new_blobs.sort_by_key(|b| &b.path);
+
     // test blobs for equality
     // (ignoring source paths because we wrote the new blob contents into a temporary directory)
-    for (original_blob, new_blob) in
-        original_manifest.blobs().iter().zip(new_manifest.blobs().iter())
-    {
+    for (original_blob, new_blob) in original_blobs.iter().zip(new_blobs.iter()) {
         let BlobInfo {
             source_path: _,
             path: original_path,
@@ -98,17 +117,20 @@
 
 #[test]
 fn config_resolves() {
-    validate_component(PASS_WITH_CONFIG, &mut test_meta_far()).unwrap();
+    let (mut meta_far, _unpacked_package) = test_meta_far();
+    validate_component(PASS_WITH_CONFIG, &mut meta_far).unwrap();
 }
 
 #[test]
 fn no_config_passes() {
-    validate_component(PASS_WITHOUT_CONFIG, &mut test_meta_far()).unwrap();
+    let (mut meta_far, _unpacked_package) = test_meta_far();
+    validate_component(PASS_WITHOUT_CONFIG, &mut meta_far).unwrap();
 }
 
 #[test]
 fn config_requires_values() {
-    match validate_component(FAIL_MISSING_CONFIG, &mut test_meta_far()).unwrap_err() {
+    let (mut meta_far, _unpacked_package) = test_meta_far();
+    match validate_component(FAIL_MISSING_CONFIG, &mut meta_far).unwrap_err() {
         ValidationError::ConfigValuesMissing { .. } => (),
         other => panic!("expected missing values, got {}", other),
     }
diff --git a/src/lib/component_hub/tests/BUILD.gn b/src/lib/component_hub/tests/BUILD.gn
index 5f54de1..13e921f 100644
--- a/src/lib/component_hub/tests/BUILD.gn
+++ b/src/lib/component_hub/tests/BUILD.gn
@@ -22,27 +22,33 @@
   sources = [ "src/lib.rs" ]
 }
 
-fuchsia_component_manifest("test_manifest") {
-  testonly = true
-  component_name = "test"
-  manifest = "meta/test.cml"
-  restricted_features = [ "structured_config" ]
-}
-
 fuchsia_component("test") {
   testonly = true
-  cm_label = ":test_manifest"
+  manifest = "meta/test.cml"
   deps = [ ":integration_test_bin" ]
 }
 
+rustc_binary("foo_noop") {
+  sources = [ "src/foo.rs" ]
+  source_root = "src/foo.rs"
+}
+
+fuchsia_component_manifest("foo_manifest") {
+  testonly = true
+  component_name = "foo"
+  manifest = "meta/foo.cml"
+  restricted_features = [ "structured_config" ]
+}
+
 fuchsia_component("foo") {
   testonly = true
-  manifest = "meta/foo.cml"
+  cm_label = ":foo_manifest"
+  deps = [ ":foo_noop" ]
 }
 
-fuchsia_structured_config_values("test_config_values") {
+fuchsia_structured_config_values("foo_config_values") {
   testonly = true
-  cm_label = ":test_manifest"
+  cm_label = ":foo_manifest"
   values_source = "config/config_values.json5"
 }
 
@@ -50,7 +56,7 @@
   test_components = [ ":test" ]
   deps = [
     ":foo",
-    ":test_config_values",
+    ":foo_config_values",
     "//src/sys/component_manager/testing/echo_server",
   ]
 }
diff --git a/src/lib/component_hub/tests/meta/foo.cml b/src/lib/component_hub/tests/meta/foo.cml
index 0967ef4..cb2d3f5 100644
--- a/src/lib/component_hub/tests/meta/foo.cml
+++ b/src/lib/component_hub/tests/meta/foo.cml
@@ -1 +1,15 @@
-{}
+{
+    // TODO(https://fxbug.dev/97805) remove include once no longer required
+    include: [ "syslog/client.shard.cml" ],
+    program: {
+        runner: "elf",
+        binary: "bin/foo_noop",
+    },
+    config: {
+        my_uint8: { type: "uint8" },
+        my_string: {
+            type: "string",
+            max_size: 100,
+        },
+    },
+}
diff --git a/src/lib/component_hub/tests/meta/test.cml b/src/lib/component_hub/tests/meta/test.cml
index 01d3b09..dc7e0ec 100644
--- a/src/lib/component_hub/tests/meta/test.cml
+++ b/src/lib/component_hub/tests/meta/test.cml
@@ -10,6 +10,7 @@
         {
             name: "foo",
             url: "#meta/foo.cm",
+            startup: "eager",
         },
         {
             name: "echo_server",
@@ -52,11 +53,4 @@
             from: "self",
         },
     ],
-    config: {
-        my_uint8: { type: "uint8" },
-        my_string: {
-            type: "string",
-            max_size: 100,
-        },
-    },
 }
diff --git a/src/lib/component_hub/tests/src/foo.rs b/src/lib/component_hub/tests/src/foo.rs
new file mode 100644
index 0000000..f080382
--- /dev/null
+++ b/src/lib/component_hub/tests/src/foo.rs
@@ -0,0 +1,5 @@
+// Copyright 2022 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+fn main() {}
diff --git a/src/lib/component_hub/tests/src/lib.rs b/src/lib/component_hub/tests/src/lib.rs
index 6046f2b..8e427d5 100644
--- a/src/lib/component_hub/tests/src/lib.rs
+++ b/src/lib/component_hub/tests/src/lib.rs
@@ -88,14 +88,6 @@
         &resolved.incoming_capabilities
     );
 
-    assert_eq!(resolved.config.len(), 2);
-    let field1 = &resolved.config[0];
-    let field2 = &resolved.config[1];
-    assert_eq!(field1.key, "my_string");
-    assert_eq!(field1.value, "\"hello, world!\"");
-    assert_eq!(field2.key, "my_uint8");
-    assert_eq!(field2.value, "255");
-
     // We do not verify the contents of the execution, because they are largely dependent on
     // the Rust Test Runner
     assert!(component.execution.is_some());
@@ -107,8 +99,18 @@
     assert_eq!(component.moniker, AbsoluteMoniker::parse_str("/foo").unwrap());
     assert_eq!(component.url, "#meta/foo.cm");
     assert_eq!(component.component_type, "CML static component");
-    assert!(component.resolved.is_none());
     assert!(component.execution.is_none());
+
+    // check foo's config
+    let resolved_foo =
+        component.resolved.as_ref().expect("foo is eager, should have been resolved");
+    assert_eq!(resolved_foo.config.len(), 2);
+    let field1 = &resolved_foo.config[0];
+    let field2 = &resolved_foo.config[1];
+    assert_eq!(field1.key, "my_string");
+    assert_eq!(field1.value, "\"hello, world!\"");
+    assert_eq!(field2.key, "my_uint8");
+    assert_eq!(field2.value, "255");
 }
 
 #[fuchsia_async::run_singlethreaded(test)]
diff --git a/src/lib/ddktl/BUILD.gn b/src/lib/ddktl/BUILD.gn
index adfd779..ab9433a 100644
--- a/src/lib/ddktl/BUILD.gn
+++ b/src/lib/ddktl/BUILD.gn
@@ -16,7 +16,7 @@
 
 sdk_source_set("ddktl") {
   category = "experimental"  # due to //zircon/system/ulib/fidl:fidl-llcpp
-  sdk_name = "ddktl-experimental-driver-only"
+  sdk_name = "ddktl"
   sources = [
     "include/ddktl/device-internal.h",
     "include/ddktl/device.h",
diff --git a/src/lib/ddktl/ddktl-experimental-driver-only.api b/src/lib/ddktl/ddktl-experimental-driver-only.api
deleted file mode 100644
index 54188ea..0000000
--- a/src/lib/ddktl/ddktl-experimental-driver-only.api
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-  "pkg/ddktl-experimental-driver-only/include/ddktl/device-internal.h": "ba5123ae2f01d0798ca9a78dff0ef881",
-  "pkg/ddktl-experimental-driver-only/include/ddktl/device.h": "7d239e0cbcf908e86d22ed617d0557ca",
-  "pkg/ddktl-experimental-driver-only/include/ddktl/fidl.h": "1ccd97dc2f51224c5b314c9b81b30ca6",
-  "pkg/ddktl-experimental-driver-only/include/ddktl/init-txn.h": "235cec9d0106288ac2b15e09c93af2bd",
-  "pkg/ddktl-experimental-driver-only/include/ddktl/metadata.h": "3edf305903afe4a95d025084e9f4ee81",
-  "pkg/ddktl-experimental-driver-only/include/ddktl/resume-txn.h": "5740648294b4d982e58324b455161de8",
-  "pkg/ddktl-experimental-driver-only/include/ddktl/suspend-txn.h": "df0ca479cc01d1228aa12c5210e819b2",
-  "pkg/ddktl-experimental-driver-only/include/ddktl/unbind-txn.h": "46723559da40bc7014997d80349c222c"
-}
\ No newline at end of file
diff --git a/src/lib/fasync/BUILD.gn b/src/lib/fasync/BUILD.gn
index 0764d7d..3275cc4 100644
--- a/src/lib/fasync/BUILD.gn
+++ b/src/lib/fasync/BUILD.gn
@@ -32,7 +32,7 @@
   sdk = "source"
 
   sdk_publishable = false
-  sdk_name = "fasync-experimental-driver-only"
+  sdk_name = "fasync"
   sdk_headers = [
     "lib/fasync/internal/bridge.h",
     "lib/fasync/internal/compiler.h",
@@ -66,7 +66,7 @@
   sdk = "source"
 
   sdk_publishable = false
-  sdk_name = "fasync-fexecutor-experimental-driver-only"
+  sdk_name = "fasync-fexecutor"
   sdk_headers = [ "lib/fasync/fexecutor.h" ]
 
   sources = [ "fexecutor.cc" ]
diff --git a/src/media/audio/audio_core/mixer/mixer.h b/src/media/audio/audio_core/mixer/mixer.h
index b2c3eea..8f9c157 100644
--- a/src/media/audio/audio_core/mixer/mixer.h
+++ b/src/media/audio/audio_core/mixer/mixer.h
@@ -18,6 +18,7 @@
 #include "src/media/audio/audio_core/mixer/constants.h"
 #include "src/media/audio/audio_core/mixer/gain.h"
 #include "src/media/audio/lib/format/constants.h"
+#include "src/media/audio/lib/format2/channel_mapper.h"
 #include "src/media/audio/lib/timeline/timeline_function.h"
 
 namespace media::audio {
@@ -583,6 +584,20 @@
   virtual void EagerlyPrepare() {}
 
  protected:
+  // Template to read normalized source samples, and combine channels if required.
+  template <typename SourceSampleType, size_t SourceChanCount, size_t DestChanCount,
+            typename Enable = void>
+  class SourceReader {
+   public:
+    static inline float Read(const SourceSampleType* source_ptr, size_t dest_chan) {
+      return mapper_.Map(source_ptr, dest_chan);
+    }
+
+   private:
+    static inline media_audio::ChannelMapper<SourceSampleType, SourceChanCount, DestChanCount>
+        mapper_;
+  };
+
   Mixer(Fixed pos_filter_width, Fixed neg_filter_width, Gain::Limits gain_limits);
 
  private:
diff --git a/src/media/audio/audio_core/mixer/mixer_utils.h b/src/media/audio/audio_core/mixer/mixer_utils.h
index 3845284..e23d322 100644
--- a/src/media/audio/audio_core/mixer/mixer_utils.h
+++ b/src/media/audio/audio_core/mixer/mixer_utils.h
@@ -5,19 +5,12 @@
 #ifndef SRC_MEDIA_AUDIO_AUDIO_CORE_MIXER_MIXER_UTILS_H_
 #define SRC_MEDIA_AUDIO_AUDIO_CORE_MIXER_MIXER_UTILS_H_
 
-#include <cmath>
 #include <type_traits>
 
 #include "src/media/audio/audio_core/mixer/constants.h"
 #include "src/media/audio/audio_core/mixer/gain.h"
-#include "src/media/audio/lib/format2/sample_converter.h"
 
-namespace media::audio {
-
-// TODO(fxbug.dev/85201): Remove this workaround, once the device properly maps channels.
-constexpr bool kResampler4ChannelWorkaround = true;
-
-namespace mixer {
+namespace media::audio::mixer {
 
 // mixer_utils.h is a collection of inline templated utility functions meant to
 // be used by mixer implementations and expanded/optimized at compile time in
@@ -62,162 +55,6 @@
 };
 
 //
-// SourceReader
-//
-// Template to read normalized source samples, and combine channels if required.
-template <typename SourceSampleType, size_t SourceChanCount, size_t DestChanCount,
-          typename Enable = void>
-class SourceReader;
-
-// N:N mapper
-template <typename SourceSampleType, size_t SourceChanCount, size_t DestChanCount>
-class SourceReader<SourceSampleType, SourceChanCount, DestChanCount,
-                   typename std::enable_if_t<(SourceChanCount == DestChanCount)>> {
- public:
-  static inline float Read(const SourceSampleType* source_ptr, size_t dest_chan) {
-    return media_audio::SampleConverter<SourceSampleType>::ToFloat(*(source_ptr + dest_chan));
-  }
-};
-
-// 1:N mapper
-template <typename SourceSampleType, size_t SourceChanCount, size_t DestChanCount>
-class SourceReader<
-    SourceSampleType, SourceChanCount, DestChanCount,
-    typename std::enable_if_t<((SourceChanCount == 1) && (SourceChanCount != DestChanCount))>> {
- public:
-  static inline float Read(const SourceSampleType* source_ptr, size_t dest_chan) {
-    return media_audio::SampleConverter<SourceSampleType>::ToFloat(*source_ptr);
-  }
-};
-
-// Mappers for 2-channel sources
-//
-// 2->1 mapper
-template <typename SourceSampleType, size_t SourceChanCount, size_t DestChanCount>
-class SourceReader<SourceSampleType, SourceChanCount, DestChanCount,
-                   typename std::enable_if_t<(SourceChanCount == 2) && (DestChanCount == 1)>> {
- public:
-  // This simple 2:1 channel mapping assumes a "LR" stereo configuration for the source channels.
-  // Each dest frame's single value is essentially the average of the 2 source chans.
-  static inline float Read(const SourceSampleType* source_ptr, size_t dest_chan) {
-    return 0.5f * (media_audio::SampleConverter<SourceSampleType>::ToFloat(*source_ptr) +
-                   media_audio::SampleConverter<SourceSampleType>::ToFloat(*(source_ptr + 1)));
-  }
-};
-
-// 2->3 mapper
-template <typename SourceSampleType, size_t SourceChanCount, size_t DestChanCount>
-class SourceReader<SourceSampleType, SourceChanCount, DestChanCount,
-                   typename std::enable_if_t<((SourceChanCount == 2) && (DestChanCount == 3))>> {
- public:
-  static inline float Read(const SourceSampleType* source_ptr, size_t dest_chan) {
-    return (dest_chan < 2
-                ? media_audio::SampleConverter<SourceSampleType>::ToFloat(*(source_ptr + dest_chan))
-                : 0.5f *
-                      (media_audio::SampleConverter<SourceSampleType>::ToFloat(*source_ptr) +
-                       media_audio::SampleConverter<SourceSampleType>::ToFloat(*(source_ptr + 1))));
-  }
-};
-
-// 2->4 mapper
-template <typename SourceSampleType, size_t SourceChanCount, size_t DestChanCount>
-class SourceReader<SourceSampleType, SourceChanCount, DestChanCount,
-                   typename std::enable_if_t<((SourceChanCount == 2) && (DestChanCount == 4))>> {
- public:
-  static inline float Read(const SourceSampleType* source_ptr, size_t dest_chan) {
-    return media_audio::SampleConverter<SourceSampleType>::ToFloat(*(source_ptr + dest_chan % 2));
-  }
-};
-
-// Mappers for 3-channel sources
-//
-// 3->1 mapper
-template <typename SourceSampleType, size_t SourceChanCount, size_t DestChanCount>
-class SourceReader<SourceSampleType, SourceChanCount, DestChanCount,
-                   typename std::enable_if_t<((SourceChanCount == 3) && (DestChanCount == 1))>> {
- public:
-  // This simple 3:1 channel mapping assumes an equal weighting of the 3 source channels.
-  // Each dest frame's single value is essentially the average of the 3 source chans.
-  static inline float Read(const SourceSampleType* source_ptr, size_t dest_chan) {
-    return (media_audio::SampleConverter<SourceSampleType>::ToFloat(*source_ptr) +
-            media_audio::SampleConverter<SourceSampleType>::ToFloat(*(source_ptr + 1)) +
-            media_audio::SampleConverter<SourceSampleType>::ToFloat(*(source_ptr + 2))) /
-           3.0f;
-  }
-};
-
-// 3->2 mapper
-constexpr auto kOnePlusRootHalf = static_cast<float>(M_SQRT1_2 + 1.0);
-// 1.70710678118654752
-constexpr auto kInverseOnePlusRootHalf = static_cast<float>(1.0 / (M_SQRT1_2 + 1.0));
-// 0.58578643762690495
-constexpr auto kInverseRootTwoPlusOne = static_cast<float>(1.0 / (M_SQRT2 + 1.0));
-
-template <typename SourceSampleType, size_t SourceChanCount, size_t DestChanCount>
-class SourceReader<SourceSampleType, SourceChanCount, DestChanCount,
-                   typename std::enable_if_t<((SourceChanCount == 3) && (DestChanCount == 2))>> {
- public:
-  // This simple 3:2 channel mapping assumes a "LRC" configuration for the 3 source channels. Thus
-  // in each 3-chan source frame and 2-chan dest frame, we mix source chans 0+2 to dest chan 0, and
-  // source chans 1+2 to dest chan 1. Because we mix it equally into two dest channels, we multiply
-  // source chan2 by sqr(.5) to maintain an equal-power contribution compared to source chans 0&1.
-  // Finally, normalize both dest chans (divide by max possible value) to keep result within bounds:
-  // "divide by 1+sqr(0.5)" is optimized to "multiply by kInverseOnePlusRootHalf".
-  static inline float Read(const SourceSampleType* source_ptr, size_t dest_chan) {
-    return kInverseOnePlusRootHalf *
-               media_audio::SampleConverter<SourceSampleType>::ToFloat(*(source_ptr + dest_chan)) +
-           kInverseRootTwoPlusOne *
-               media_audio::SampleConverter<SourceSampleType>::ToFloat(*(source_ptr + 2));
-  }
-};
-
-// Mappers for 4-channel sources
-//
-// 4->1 mapper
-template <typename SourceSampleType, size_t SourceChanCount, size_t DestChanCount>
-class SourceReader<SourceSampleType, SourceChanCount, DestChanCount,
-                   typename std::enable_if_t<((SourceChanCount == 4) && (DestChanCount == 1))>> {
- public:
-  // This simple 4:1 channel mapping averages the incoming 4 source channels to determine the value
-  // for the lone destination channel.
-  static inline float Read(const SourceSampleType* source_ptr, size_t dest_chan) {
-    if constexpr (kResampler4ChannelWorkaround) {
-      // As a temporary measure, ignore channels 2 and 3.
-      // TODO(fxbug.dev/85201): Remove this workaround, once the device properly maps channels.
-      return 0.5f * (media_audio::SampleConverter<SourceSampleType>::ToFloat(*(source_ptr + 0)) +
-                     media_audio::SampleConverter<SourceSampleType>::ToFloat(*(source_ptr + 1)));
-    } else {
-      return 0.25f * (media_audio::SampleConverter<SourceSampleType>::ToFloat(*(source_ptr + 0)) +
-                      media_audio::SampleConverter<SourceSampleType>::ToFloat(*(source_ptr + 1)) +
-                      media_audio::SampleConverter<SourceSampleType>::ToFloat(*(source_ptr + 2)) +
-                      media_audio::SampleConverter<SourceSampleType>::ToFloat(*(source_ptr + 3)));
-    }
-  }
-};
-
-// 4->2 mapper
-template <typename SourceSampleType, size_t SourceChanCount, size_t DestChanCount>
-class SourceReader<SourceSampleType, SourceChanCount, DestChanCount,
-                   typename std::enable_if_t<((SourceChanCount == 4) && (DestChanCount == 2))>> {
- public:
-  // This simple 4:2 channel mapping assumes a "LRLR" configuration for the 4 source channels (e.g.
-  // a "four corners" Quad config: FrontL|FrontR|BackL|BackR). Thus in each 4-chan source frame and
-  // 2-chan dest frame, we mix source chans 0+2 to dest chan 0, and source chans 1+3 to dest chan 1.
-  static inline float Read(const SourceSampleType* source_ptr, size_t dest_chan) {
-    if constexpr (kResampler4ChannelWorkaround) {
-      // As a temporary measure, ignore channels 2 and 3.
-      // TODO(fxbug.dev/85201): Remove this workaround, once the device properly maps channels.
-      return media_audio::SampleConverter<SourceSampleType>::ToFloat(*(source_ptr + dest_chan));
-    } else {
-      return 0.5f *
-             (media_audio::SampleConverter<SourceSampleType>::ToFloat(*(source_ptr + dest_chan)) +
-              media_audio::SampleConverter<SourceSampleType>::ToFloat(
-                  *(source_ptr + dest_chan + 2)));
-    }
-  }
-};
-
-//
 // Interpolation variants
 //
 // Fixed::Format::FractionalBits is 13 for Fixed types, so max alpha of "1.0" is 0x00002000.
@@ -251,7 +88,6 @@
   }
 };
 
-}  // namespace mixer
-}  // namespace media::audio
+}  // namespace media::audio::mixer
 
 #endif  // SRC_MEDIA_AUDIO_AUDIO_CORE_MIXER_MIXER_UTILS_H_
diff --git a/src/media/audio/audio_core/mixer/mixer_utils_unittest.cc b/src/media/audio/audio_core/mixer/mixer_utils_unittest.cc
index 1b78647..633cebe 100644
--- a/src/media/audio/audio_core/mixer/mixer_utils_unittest.cc
+++ b/src/media/audio/audio_core/mixer/mixer_utils_unittest.cc
@@ -67,218 +67,6 @@
 }
 
 //
-// SourceReader tests all use float, as type conversion is handled by SamplerNormalizer
-//
-// Validate N->N channel mapping, including higher channel counts.
-// Expectation: each source channel maps identically to that destination channel.
-TEST(SourceReaderTest, Map_N_N) {
-  const float data[] = {-1.0, 1.0, 0.0, 0.5};
-
-  EXPECT_EQ((mixer::SourceReader<float, 1, 1>::Read(data, 0)), data[0]);
-  EXPECT_EQ((mixer::SourceReader<float, 1, 1>::Read(data + 3, 0)), data[3]);
-
-  EXPECT_EQ((mixer::SourceReader<float, 2, 2>::Read(data, 0)), data[0]);
-  EXPECT_EQ((mixer::SourceReader<float, 2, 2>::Read(data, 1)), data[1]);
-  EXPECT_EQ((mixer::SourceReader<float, 2, 2>::Read(data + 2, 0)), data[2]);
-  EXPECT_EQ((mixer::SourceReader<float, 2, 2>::Read(data + 2, 1)), data[3]);
-
-  EXPECT_EQ((mixer::SourceReader<float, 3, 3>::Read(data, 0)), data[0]);
-  EXPECT_EQ((mixer::SourceReader<float, 3, 3>::Read(data, 1)), data[1]);
-  EXPECT_EQ((mixer::SourceReader<float, 3, 3>::Read(data + 2, 0)), data[2]);
-  EXPECT_EQ((mixer::SourceReader<float, 3, 3>::Read(data + 1, 2)), data[3]);
-
-  EXPECT_EQ((mixer::SourceReader<float, 4, 4>::Read(data, 0)), data[0]);
-  EXPECT_EQ((mixer::SourceReader<float, 4, 4>::Read(data, 1)), data[1]);
-  EXPECT_EQ((mixer::SourceReader<float, 4, 4>::Read(data, 2)), data[2]);
-  EXPECT_EQ((mixer::SourceReader<float, 4, 4>::Read(data, 3)), data[3]);
-
-  EXPECT_EQ((mixer::SourceReader<float, 6, 6>::Read(data, 1)), data[1]);
-  EXPECT_EQ((mixer::SourceReader<float, 6, 6>::Read(data, 2)), data[2]);
-
-  EXPECT_EQ((mixer::SourceReader<float, 8, 8>::Read(data, 0)), data[0]);
-  EXPECT_EQ((mixer::SourceReader<float, 8, 8>::Read(data, 3)), data[3]);
-}
-
-// Validate 1->N channel mapping, including higher destination channel counts.
-// Expectation: the one source channel maps to every destination channel without attenuation.
-TEST(SourceReaderTest, Map_1_N) {
-  const float data[] = {0.76543f, 0.0};
-
-  EXPECT_EQ((mixer::SourceReader<float, 1, 1>::Read(data, 0)), *data);
-
-  EXPECT_EQ((mixer::SourceReader<float, 1, 2>::Read(data, 0)), *data);
-  EXPECT_EQ((mixer::SourceReader<float, 1, 2>::Read(data, 1)),
-            (mixer::SourceReader<float, 1, 2>::Read(data, 0)));
-
-  EXPECT_EQ((mixer::SourceReader<float, 1, 3>::Read(data, 0)), *data);
-  EXPECT_EQ((mixer::SourceReader<float, 1, 3>::Read(data, 1)),
-            (mixer::SourceReader<float, 1, 3>::Read(data, 0)));
-  EXPECT_EQ((mixer::SourceReader<float, 1, 3>::Read(data, 2)),
-            (mixer::SourceReader<float, 1, 3>::Read(data, 0)));
-
-  EXPECT_EQ((mixer::SourceReader<float, 1, 4>::Read(data, 0)), *data);
-  EXPECT_EQ((mixer::SourceReader<float, 1, 4>::Read(data, 1)),
-            (mixer::SourceReader<float, 1, 4>::Read(data, 0)));
-  EXPECT_EQ((mixer::SourceReader<float, 1, 4>::Read(data, 2)),
-            (mixer::SourceReader<float, 1, 4>::Read(data, 0)));
-  EXPECT_EQ((mixer::SourceReader<float, 1, 4>::Read(data, 3)),
-            (mixer::SourceReader<float, 1, 4>::Read(data, 0)));
-
-  EXPECT_EQ((mixer::SourceReader<float, 1, 5>::Read(data, 1)), *data);
-  EXPECT_EQ((mixer::SourceReader<float, 1, 5>::Read(data, 4)),
-            (mixer::SourceReader<float, 1, 5>::Read(data, 1)));
-
-  EXPECT_EQ((mixer::SourceReader<float, 1, 8>::Read(data, 2)), *data);
-  EXPECT_EQ((mixer::SourceReader<float, 1, 8>::Read(data, 7)),
-            (mixer::SourceReader<float, 1, 8>::Read(data, 2)));
-}
-
-// Validate 2->1 channel mapping.
-// Expectation: each source channel should contribute equally to the one destination channel.
-// The one destination channel is average of all source channels.
-TEST(SourceReaderTest, Map_2_1) {
-  using SR = mixer::SourceReader<float, 2, 1>;
-  const float data[] = {-1.0, 1.0, 0.5};
-  const float expect[] = {0, 0.75};
-
-  EXPECT_EQ(SR::Read(data, 0), expect[0]);
-  EXPECT_EQ(SR::Read(data + 1, 0), expect[1]);
-}
-
-// Validate 2->3 channel mapping.
-// Expectation: 3-channel destination is L.R.C (or some other geometry where third destination
-// channel should contain an equal mix of the two source channels).
-// dest[0] is source[0]; dest[1] is source[1]; dest[2] is average of source[0] and source[1].
-TEST(SourceReaderTest, Map_2_3) {
-  using SR = mixer::SourceReader<float, 2, 3>;
-  const float data[] = {-1.0, 1.0, 0.5};
-  const float expect_chan2[] = {0.0, 0.75};
-
-  EXPECT_EQ(SR::Read(data, 0), data[0]);
-  EXPECT_EQ(SR::Read(data, 1), data[1]);
-  EXPECT_EQ(SR::Read(data, 2), expect_chan2[0]);
-
-  EXPECT_EQ(SR::Read(data + 1, 0), data[1]);
-  EXPECT_EQ(SR::Read(data + 1, 1), data[2]);
-  EXPECT_EQ(SR::Read(data + 1, 2), expect_chan2[1]);
-}
-
-// Validate 2->4 channel mapping.
-// Expectation: 4-chan destination is "4 corners" FL.FR.BL.BR (or other L.R.L.R geometry).
-// We map each source channel equally to the two destination channels on each side.
-TEST(SourceReaderTest, Map_2_4) {
-  using SR = mixer::SourceReader<float, 2, 4>;
-  const float data[] = {-1.0, 1.0, 0.5};
-
-  EXPECT_EQ(SR::Read(data, 0), data[0]);
-  EXPECT_EQ(SR::Read(data, 1), data[1]);
-  EXPECT_EQ(SR::Read(data, 2), SR::Read(data, 0));
-  EXPECT_EQ(SR::Read(data, 3), SR::Read(data, 1));
-
-  EXPECT_EQ(SR::Read(data + 1, 0), data[1]);
-  EXPECT_EQ(SR::Read(data + 1, 1), data[2]);
-  EXPECT_EQ(SR::Read(data + 1, 2), SR::Read(data + 1, 0));
-  EXPECT_EQ(SR::Read(data + 1, 3), SR::Read(data + 1, 1));
-}
-
-// Validate 3->1 channel mapping.
-// Expectation: each source channel should contribute equally to the one destination channel.
-// The one destination channel is average of all source channels.
-TEST(SourceReaderTest, Map_3_1) {
-  using SR = mixer::SourceReader<float, 3, 1>;
-  const float data[] = {-0.5f, 1.0f, 1.0f, -0.8f};
-  const float expect[] = {0.5f, 0.4f};
-
-  EXPECT_EQ(SR::Read(data, 0), expect[0]);
-  EXPECT_EQ(SR::Read(data, 1), SR::Read(data, 0));
-  EXPECT_EQ(SR::Read(data, 2), SR::Read(data, 0));
-
-  EXPECT_EQ(SR::Read(data + 1, 0), expect[1]);
-  EXPECT_EQ(SR::Read(data + 1, 1), SR::Read(data + 1, 0));
-  EXPECT_EQ(SR::Read(data + 1, 2), SR::Read(data + 1, 0));
-}
-
-// Validate 3->2 channel mapping.
-// Expectation: 3-channel source is L.R.C (or some other geometry where third source channel should
-// be distributed evenly into both destination channels).
-//
-// Conceptually, dest[0] becomes source[0] + source[2]/2; dest[1] becomes source[1] + source[2]/2.
-// However when contributing source[2] to two destinations, we must conserve the POWER of
-// source[2] relative to the other source channels -- we add sqr(0.5)*source[2] (not 0.5*source[2])
-// to each side -- and then normalize the result to eliminate clipping.
-//
-//   dest[0] = (0.585786... * source[0]) + (0.414213... * source[2])
-//   dest[1] = (0.585786... * source[1]) + (0.414213... * source[2])
-TEST(SourceReaderTest, Map_3_2) {
-  using SR = mixer::SourceReader<float, 3, 2>;
-  const float data[] = {1, -0.5, -0.5, -1};
-  const float expect[] = {0.378679656f, -0.5f, -0.70710678f};
-
-  EXPECT_FLOAT_EQ(SR::Read(data, 0), expect[0]);
-  EXPECT_FLOAT_EQ(SR::Read(data, 1), expect[1]);
-
-  EXPECT_FLOAT_EQ(SR::Read(data + 1, 0), expect[2]);
-  EXPECT_EQ(SR::Read(data + 1, 1), SR::Read(data + 1, 0));
-}
-
-// No built-in 3->4 mapping is provided
-
-// Validate 4->1 channel mapping.
-// Expectation: each source channel should contribute equally to the one destination channel.
-// The one destination channel is average of all source channels.
-TEST(SourceReaderTest, Map_4_1) {
-  using SR = mixer::SourceReader<float, 4, 1>;
-  const float data[] = {-0.25f, 0.75f, 1.0f, -0.5f, -0.05f};
-
-  std::array<float, 2> expect;
-  if constexpr (kResampler4ChannelWorkaround) {
-    // For now, the 4->1 mapper will just ignore channels 2 and 3.
-    // TODO(fxbug.dev/85201): Remove this workaround, once the device properly maps channels.
-    expect = {0.25f, 0.875f};
-  } else {
-    expect = {0.25f, 0.3f};
-  }
-
-  EXPECT_EQ(SR::Read(data, 0), expect[0]);
-  EXPECT_EQ(SR::Read(data, 1), SR::Read(data, 0));
-  EXPECT_EQ(SR::Read(data, 2), SR::Read(data, 0));
-  EXPECT_EQ(SR::Read(data, 3), SR::Read(data, 0));
-
-  EXPECT_EQ(SR::Read(data + 1, 0), expect[1]);
-  EXPECT_EQ(SR::Read(data + 1, 1), SR::Read(data + 1, 0));
-  EXPECT_EQ(SR::Read(data + 1, 2), SR::Read(data + 1, 0));
-  EXPECT_EQ(SR::Read(data + 1, 3), SR::Read(data + 1, 0));
-}
-
-// Validate 4->2 channel mapping.
-// Expectation: 4-chan source is "4 corners" FL.FR.BL.BR (or other L.R.L.R geometry).
-// We assign equal weight to the source channels on each side.
-// dest[0] is average of source[0] and [2]; dest[1] is average of source[1] and [3].
-TEST(SourceReaderTest, Map_4_2) {
-  using SR = mixer::SourceReader<float, 4, 2>;
-  const float data[] = {-0.25, 0.75, 1.0, -0.5, 0.0};
-
-  std::array<float, 3> expect;
-  if constexpr (kResampler4ChannelWorkaround) {
-    // For now, the 4->2 mapper will just ignore channels 2 and 3.
-    // TODO(fxbug.dev/85201): Remove this workaround, once the device properly maps channels.
-    expect = {-0.25, 0.75, 1.0};
-  } else {
-    expect = {0.375, 0.125, 0.5};
-  }
-
-  EXPECT_EQ(SR::Read(data, 0), expect[0]);
-  EXPECT_EQ(SR::Read(data, 1), expect[1]);
-  EXPECT_EQ(SR::Read(data + 1, 0), expect[1]);
-  EXPECT_EQ(SR::Read(data + 1, 1), expect[2]);
-}
-
-// No built-in 4->3 mapping is provided
-
-// No built-in mappings are provided for configs with source channels or dest channels above 4
-// (other than the "pass-thru" "N->N and "unity" 1->N mappings).
-
-//
 // DestMixer tests focus primarily on accumulate functionality, since DestMixer internally uses
 // SampleScaler which is validated above.
 //
diff --git a/src/media/audio/audio_core/mixer/point_sampler_unittest.cc b/src/media/audio/audio_core/mixer/point_sampler_unittest.cc
index 0dc97e6..6dc92cb 100644
--- a/src/media/audio/audio_core/mixer/point_sampler_unittest.cc
+++ b/src/media/audio/audio_core/mixer/point_sampler_unittest.cc
@@ -8,6 +8,7 @@
 #include <gtest/gtest.h>
 
 #include "src/media/audio/audio_core/mixer/mixer_utils.h"
+#include "src/media/audio/lib/format2/channel_mapper.h"
 #include "src/media/audio/lib/format2/sample_converter.h"
 #include "src/media/audio/lib/processing/gain.h"
 
@@ -395,7 +396,7 @@
   // Express expected values as "int24" (not int32) to clearly show fractional and min/max values.
   auto accum = std::vector<float>(source.size() / 4);
   std::vector<float> expect;
-  if constexpr (kResampler4ChannelWorkaround) {
+  if constexpr (media_audio::kEnable4ChannelWorkaround) {
     // For now, 4->1 just ignores channels 2 & 3.
     // TODO(fxbug.dev/85201): Remove this workaround, once the device properly maps channels.
     expect = {
@@ -445,7 +446,7 @@
 
   // Express expected values as "int24" (not int32) to clearly show fractional and min/max values.
   std::vector<float> expect;
-  if constexpr (kResampler4ChannelWorkaround) {
+  if constexpr (media_audio::kEnable4ChannelWorkaround) {
     // For now, 4->2 just ignores channels 2 & 3.
     // TODO(fxbug.dev/85201): Remove this workaround, once the device properly maps channels.
     expect = {1, -1, -0x800000, 0x7FFFFF, 0x7FFFFF, 0};
diff --git a/src/media/audio/lib/format2/BUILD.gn b/src/media/audio/lib/format2/BUILD.gn
index b6e99c0..a17b384 100644
--- a/src/media/audio/lib/format2/BUILD.gn
+++ b/src/media/audio/lib/format2/BUILD.gn
@@ -11,6 +11,7 @@
 
 source_set("format2") {
   sources = [
+    "channel_mapper.h",
     "format.cc",
     "format.h",
     "sample_converter.h",
@@ -34,6 +35,7 @@
   output_name = "audio-libformat2-unittests"
 
   sources = [
+    "channel_mapper_unittest.cc",
     "format_unittest.cc",
     "sample_converter_unittest.cc",
   ]
diff --git a/src/media/audio/lib/format2/channel_mapper.h b/src/media/audio/lib/format2/channel_mapper.h
new file mode 100644
index 0000000..b77e339
--- /dev/null
+++ b/src/media/audio/lib/format2/channel_mapper.h
@@ -0,0 +1,176 @@
+// Copyright 2022 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SRC_MEDIA_AUDIO_LIB_FORMAT_2_CHANNEL_MAPPER_H_
+#define SRC_MEDIA_AUDIO_LIB_FORMAT_2_CHANNEL_MAPPER_H_
+
+#include <array>
+#include <cmath>
+#include <type_traits>
+#include <utility>
+
+#include "src/media/audio/lib/format2/sample_converter.h"
+
+namespace media_audio {
+
+// TODO(fxbug.dev/85201): Remove this workaround, once the device properly maps channels.
+inline constexpr bool kEnable4ChannelWorkaround = true;
+
+// Template to map an input frame of `InputSampleType` with `InputChannelCount` into each output
+// sample with `OutputChannelCount` in a normalized 32-bit float format.
+template <typename InputSampleType, size_t InputChannelCount, size_t OutputChannelCount,
+          bool Customizable = false, typename Trait = void>
+class ChannelMapper;
+
+// N -> N channel mapper (passthrough).
+template <typename InputSampleType, size_t InputChannelCount, size_t OutputChannelCount>
+class ChannelMapper<InputSampleType, InputChannelCount, OutputChannelCount, /*Customizable*/ false,
+                    typename std::enable_if_t<(InputChannelCount == OutputChannelCount)>> {
+ public:
+  inline float Map(const InputSampleType* input, size_t output_channel) {
+    return SampleConverter<InputSampleType>::ToFloat(input[output_channel]);
+  }
+};
+
+// 1 -> N channel mapper.
+template <typename InputSampleType, size_t InputChannelCount, size_t OutputChannelCount>
+class ChannelMapper<InputSampleType, InputChannelCount, OutputChannelCount, /*Customizable*/ false,
+                    typename std::enable_if_t<(InputChannelCount == 1 && OutputChannelCount > 1)>> {
+ public:
+  inline float Map(const InputSampleType* input, [[maybe_unused]] size_t output_channel) {
+    return SampleConverter<InputSampleType>::ToFloat(input[0]);
+  }
+};
+
+// 2 -> 1 channel mapper.
+template <typename InputSampleType, size_t InputChannelCount, size_t OutputChannelCount>
+class ChannelMapper<
+    InputSampleType, InputChannelCount, OutputChannelCount, /*Customizable*/ false,
+    typename std::enable_if_t<(InputChannelCount == 2 && OutputChannelCount == 1)>> {
+ public:
+  inline float Map(const InputSampleType* input, [[maybe_unused]] size_t output_channel) {
+    // Assumes a configuration with equal weighting of each channel.
+    return 0.5f * (SampleConverter<InputSampleType>::ToFloat(input[0]) +
+                   SampleConverter<InputSampleType>::ToFloat(input[1]));
+  }
+};
+
+// 2 -> 3 channel mapper.
+template <typename InputSampleType, size_t InputChannelCount, size_t OutputChannelCount>
+class ChannelMapper<
+    InputSampleType, InputChannelCount, OutputChannelCount, /*Customizable*/ false,
+    typename std::enable_if_t<(InputChannelCount == 2 && OutputChannelCount == 3)>> {
+ public:
+  inline float Map(const InputSampleType* input, size_t output_channel) {
+    // Assumes a configuration where the third channel is an equally weighted downmix of the first
+    // two channels.
+    return (output_channel < 2) ? SampleConverter<InputSampleType>::ToFloat(input[output_channel])
+                                : 0.5f * (SampleConverter<InputSampleType>::ToFloat(input[0]) +
+                                          SampleConverter<InputSampleType>::ToFloat(input[1]));
+  }
+};
+
+// 2 -> 4 channel mapper.
+template <typename InputSampleType, size_t InputChannelCount, size_t OutputChannelCount>
+class ChannelMapper<
+    InputSampleType, InputChannelCount, OutputChannelCount, /*Customizable*/ false,
+    typename std::enable_if_t<(InputChannelCount == 2 && OutputChannelCount == 4)>> {
+ public:
+  inline float Map(const InputSampleType* input, size_t output_channel) {
+    return SampleConverter<InputSampleType>::ToFloat(input[output_channel % 2]);
+  }
+};
+
+// 3 -> 1 channel mapper.
+template <typename InputSampleType, size_t InputChannelCount, size_t OutputChannelCount>
+class ChannelMapper<
+    InputSampleType, InputChannelCount, OutputChannelCount, /*Customizable*/ false,
+    typename std::enable_if_t<(InputChannelCount == 3 && OutputChannelCount == 1)>> {
+ public:
+  // Assumes a configuration with equal weighting of each channel.
+  inline float Map(const InputSampleType* input, [[maybe_unused]] size_t output_channel) {
+    return (SampleConverter<InputSampleType>::ToFloat(input[0]) +
+            SampleConverter<InputSampleType>::ToFloat(input[1]) +
+            SampleConverter<InputSampleType>::ToFloat(input[2])) /
+           3.0f;
+  }
+};
+
+// 3 -> 2 channel mapper.
+template <typename InputSampleType, size_t InputChannelCount, size_t OutputChannelCount>
+class ChannelMapper<
+    InputSampleType, InputChannelCount, OutputChannelCount, /*Customizable*/ false,
+    typename std::enable_if_t<(InputChannelCount == 3 && OutputChannelCount == 2)>> {
+ public:
+  inline float Map(const InputSampleType* input, size_t output_channel) {
+    return SampleConverter<InputSampleType>::ToFloat(input[output_channel]) *
+               kInverseOnePlusRootHalf +
+           SampleConverter<InputSampleType>::ToFloat(input[2]) * kInverseRootTwoPlusOne;
+  }
+
+ private:
+  static constexpr float kInverseOnePlusRootHalf = static_cast<float>(1.0 / (M_SQRT1_2 + 1.0));
+  static constexpr float kInverseRootTwoPlusOne = static_cast<float>(1.0 / (M_SQRT2 + 1.0));
+};
+
+// 4 -> 1 channel mapper.
+template <typename InputSampleType, size_t InputChannelCount, size_t OutputChannelCount>
+class ChannelMapper<
+    InputSampleType, InputChannelCount, OutputChannelCount, /*Customizable*/ false,
+    typename std::enable_if_t<(InputChannelCount == 4 && OutputChannelCount == 1)>> {
+ public:
+  inline float Map(const InputSampleType* input, [[maybe_unused]] size_t output_channel) {
+    if constexpr (kEnable4ChannelWorkaround) {
+      // TODO(fxbug.dev/85201): Temporarily ignore the third and fourth channels.
+      return 0.5f * (SampleConverter<InputSampleType>::ToFloat(input[0]) +
+                     SampleConverter<InputSampleType>::ToFloat(input[1]));
+    }
+    return 0.25f * (SampleConverter<InputSampleType>::ToFloat(input[0]) +
+                    SampleConverter<InputSampleType>::ToFloat(input[1]) +
+                    SampleConverter<InputSampleType>::ToFloat(input[2]) +
+                    SampleConverter<InputSampleType>::ToFloat(input[3]));
+  }
+};
+
+// 4 -> 2 channel mapper.
+template <typename InputSampleType, size_t InputChannelCount, size_t OutputChannelCount>
+class ChannelMapper<
+    InputSampleType, InputChannelCount, OutputChannelCount, /*Customizable*/ false,
+    typename std::enable_if_t<(InputChannelCount == 4 && OutputChannelCount == 2)>> {
+ public:
+  inline float Map(const InputSampleType* input, size_t output_channel) {
+    if constexpr (kEnable4ChannelWorkaround) {
+      // TODO(fxbug.dev/85201): Temporarily ignore the third and fourth channels.
+      return SampleConverter<InputSampleType>::ToFloat(input[output_channel]);
+    }
+    return 0.5f * (SampleConverter<InputSampleType>::ToFloat(input[output_channel]) +
+                   SampleConverter<InputSampleType>::ToFloat(input[output_channel + 2]));
+  }
+};
+
+// M -> N customizable channel mapper.
+template <typename InputSampleType, size_t InputChannelCount, size_t OutputChannelCount>
+class ChannelMapper<InputSampleType, InputChannelCount, OutputChannelCount, /*Customizable=*/true> {
+ public:
+  explicit ChannelMapper(
+      std::array<std::array<float, InputChannelCount>, OutputChannelCount> coefficients)
+      : coefficients_(std::move(coefficients)) {}
+
+  inline float Map(const InputSampleType* input, size_t output_channel) {
+    float output = 0.0f;
+    for (size_t input_channel = 0; input_channel < InputChannelCount; ++input_channel) {
+      output += coefficients_[output_channel][input_channel] *
+                SampleConverter<InputSampleType>::ToFloat(input[input_channel]);
+    }
+    return output;
+  }
+
+ private:
+  // Normalized channel coefficients.
+  std::array<std::array<float, InputChannelCount>, OutputChannelCount> coefficients_;
+};
+
+}  // namespace media_audio
+
+#endif  //  SRC_MEDIA_AUDIO_LIB_FORMAT_2_CHANNEL_MAPPER_H_
diff --git a/src/media/audio/lib/format2/channel_mapper_unittest.cc b/src/media/audio/lib/format2/channel_mapper_unittest.cc
new file mode 100644
index 0000000..207cf46
--- /dev/null
+++ b/src/media/audio/lib/format2/channel_mapper_unittest.cc
@@ -0,0 +1,169 @@
+// Copyright 2022 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/media/audio/lib/format2/channel_mapper.h"
+
+#include <cstdint>
+#include <vector>
+
+#include <gtest/gtest.h>
+
+#include "src/media/audio/lib/format2/sample_converter.h"
+
+namespace media_audio {
+namespace {
+
+TEST(ChannelMapperTest, SameChannels) {
+  ChannelMapper<int16_t, 4, 4> mapper;
+
+  const std::vector<int16_t> input = {-0x4000, kMinInt16, 0, 0x4000};
+  const std::vector<float> expected = {-0.5f, -1.0f, 0.0f, 0.5f};
+  for (size_t channel = 0; channel < expected.size(); ++channel) {
+    EXPECT_FLOAT_EQ(mapper.Map(input.data(), channel), expected[channel]);
+  }
+}
+
+TEST(ChannelMapperTest, MonoToStereo) {
+  ChannelMapper<int16_t, 1, 2> mapper;
+
+  const int16_t input = 0x4000;  // 0.5f
+  for (size_t channel = 0; channel < 2; ++channel) {
+    EXPECT_FLOAT_EQ(mapper.Map(&input, 0), 0.5f);
+  }
+}
+
+TEST(ChannelMapperTest, MonoToThreeChannels) {
+  ChannelMapper<uint8_t, 1, 3> mapper;
+
+  const uint8_t input = 0x40;  // -0.5f
+  for (size_t channel = 0; channel < 3; ++channel) {
+    EXPECT_FLOAT_EQ(mapper.Map(&input, 0), -0.5f);
+  }
+}
+
+TEST(ChannelMapperTest, MonoToFourChannels) {
+  ChannelMapper<float, 1, 4> mapper;
+
+  const float input = 0.2f;
+  for (size_t channel = 0; channel < 4; ++channel) {
+    EXPECT_FLOAT_EQ(mapper.Map(&input, 0), 0.2f);
+  }
+}
+
+TEST(ChannelMapperTest, StereoToMono) {
+  ChannelMapper<int16_t, 2, 1> mapper;
+
+  const std::vector<int16_t> input = {-0x2000, -0x4000};  // {-0.25f, -0.5f}
+  EXPECT_FLOAT_EQ(mapper.Map(input.data(), 0), -0.375f);
+}
+
+TEST(ChannelMapperTest, StereoToThreeChannels) {
+  ChannelMapper<float, 2, 3> mapper;
+
+  const std::vector<float> input = {-0.25f, 0.75f};
+  const std::vector<float> expected = {-0.25f, 0.75f, 0.25f};
+  for (size_t channel = 0; channel < expected.size(); ++channel) {
+    EXPECT_FLOAT_EQ(mapper.Map(input.data(), channel), expected[channel]);
+  }
+}
+
+TEST(ChannelMapperTest, StereoToFourChannels) {
+  ChannelMapper<float, 2, 4> mapper;
+
+  const std::vector<float> input = {-0.25f, 0.75f};
+  const std::vector<float> expected = {-0.25f, 0.75f, -0.25f, 0.75f};
+  for (size_t channel = 0; channel < expected.size(); ++channel) {
+    EXPECT_FLOAT_EQ(mapper.Map(input.data(), channel), expected[channel]);
+  }
+}
+
+TEST(ChannelMapperTest, ThreeChannelsToMono) {
+  ChannelMapper<float, 3, 1> mapper;
+
+  const std::vector<float> input = {-1.0f, 0.5f, -0.1f};
+  EXPECT_FLOAT_EQ(mapper.Map(input.data(), 0), -0.2f);
+}
+
+TEST(ChannelMapperTest, ThreeChannelsToStereo) {
+  ChannelMapper<float, 3, 2> mapper;
+
+  const std::vector<float> input = {1.0f, -0.5f, -0.5f};
+  const std::vector<float> expected = {0.378679656f, -0.5f};
+  for (size_t channel = 0; channel < expected.size(); ++channel) {
+    EXPECT_FLOAT_EQ(mapper.Map(input.data(), channel), expected[channel]);
+  }
+}
+
+TEST(ChannelMapperTest, FourChannelsToMono) {
+  ChannelMapper<float, 4, 1> mapper;
+
+  const std::vector<float> input = {1.0f, 2.0f, 3.0f, 4.0f};
+  if constexpr (kEnable4ChannelWorkaround) {
+    EXPECT_FLOAT_EQ(mapper.Map(input.data(), 0), 1.5f);
+  } else {
+    EXPECT_FLOAT_EQ(mapper.Map(input.data(), 0), 5.0f);
+  }
+}
+
+TEST(ChannelMapperTest, FourChannelsToStereo) {
+  ChannelMapper<float, 4, 2> mapper;
+
+  const std::vector<float> input = {1.0f, 2.0f, 3.0f, 4.0f};
+  if constexpr (kEnable4ChannelWorkaround) {
+    EXPECT_FLOAT_EQ(mapper.Map(input.data(), 0), 1.0f);
+    EXPECT_FLOAT_EQ(mapper.Map(input.data(), 1), 2.0f);
+  } else {
+    EXPECT_FLOAT_EQ(mapper.Map(input.data(), 0), 1.5f);
+    EXPECT_FLOAT_EQ(mapper.Map(input.data(), 1), 3.0f);
+  }
+}
+
+TEST(ChannelMapperTest, CustomizableSameChannels) {
+  ChannelMapper<float, 2, 2, /*Customizable=*/true> mapper({{
+      {-1.0f, 0.0f},
+      {0.5f, 0.5f},
+  }});
+
+  const std::vector<float> input = {2.0f, 3.0f};
+  const std::vector<float> expected = {-2.0f, 2.5f};
+  for (size_t channel = 0; channel < expected.size(); ++channel) {
+    EXPECT_FLOAT_EQ(mapper.Map(input.data(), channel), expected[channel]);
+  }
+}
+
+TEST(ChannelMapperTest, CustomizableSingleToMulti) {
+  ChannelMapper<float, 1, 3, /*Customizable=*/true> mapper({{{1.0f}, {-2.0f}, {3.0f}}});
+
+  const std::vector<float> input = {0.5f};
+  const std::vector<float> expected = {0.5f, -1.0f, 1.5f};
+  for (size_t channel = 0; channel < expected.size(); ++channel) {
+    EXPECT_FLOAT_EQ(mapper.Map(input.data(), channel), expected[channel]);
+  }
+}
+
+TEST(ChannelMapperTest, CustomizableMultiToSingle) {
+  ChannelMapper<float, 2, 1, /*Customizable=*/true> mapper({{{1.0f, 0.25f}}});
+
+  const std::vector<float> input = {2.0f, 4.0f};
+  EXPECT_FLOAT_EQ(mapper.Map(input.data(), 0), 3.0f);
+}
+
+TEST(ChannelMapperTest, CustomizableMultiToMulti) {
+  ChannelMapper<float, 3, 5, /*Customizable=*/true> mapper({{
+      {1.0f, 0.0f, 0.0f},
+      {0.0f, 1.0f, 0.0f},
+      {0.0f, 0.0f, 1.0f},
+      {1.0f, 1.0f, 1.0f},
+      {-1.0f, 2.0f, -3.0f},
+  }});
+
+  const std::vector<float> input = {1.0f, 2.0f, 3.0f};
+  const std::vector<float> expected = {1.0f, 2.0f, 3.0f, 6.0f, -6.0f};
+  for (size_t channel = 0; channel < expected.size(); ++channel) {
+    EXPECT_FLOAT_EQ(mapper.Map(input.data(), channel), expected[channel]);
+  }
+}
+
+}  // namespace
+}  // namespace media_audio
diff --git a/src/media/audio/lib/format2/sample_converter.h b/src/media/audio/lib/format2/sample_converter.h
index 6ee5a80..c3d3cb4 100644
--- a/src/media/audio/lib/format2/sample_converter.h
+++ b/src/media/audio/lib/format2/sample_converter.h
@@ -35,7 +35,7 @@
 inline constexpr double kInt24ToFloat = 1.0 / kFloatToInt24;
 inline constexpr int64_t kFloatToInt24In32 = -static_cast<int64_t>(kMinInt24In32);
 
-// Template to convert a sample s `SampleType` from/to a normalized 32-bit float format.
+// Template to convert a sample of `SampleType` from/to a normalized 32-bit float format.
 template <typename SampleType, typename Trait = void>
 struct SampleConverter;
 
diff --git a/src/security/pkg_test/fshost/BUILD.gn b/src/security/pkg_test/fshost/BUILD.gn
index d1f5471..966458c 100644
--- a/src/security/pkg_test/fshost/BUILD.gn
+++ b/src/security/pkg_test/fshost/BUILD.gn
@@ -14,7 +14,6 @@
   sources = [
     "src/fshost.rs",
     "src/main.rs",
-    "src/pkgfs.rs",
     "src/storage.rs",
   ]
   deps = [
@@ -28,7 +27,6 @@
     "//src/lib/fuchsia-async",
     "//src/lib/fuchsia-component",
     "//src/lib/fuchsia-runtime",
-    "//src/lib/scoped_task",
     "//src/lib/storage/fs_management/rust:fs_management",
     "//src/lib/storage/ramdevice_client",
     "//src/lib/storage/vfs/rust:vfs",
@@ -36,8 +34,6 @@
     "//src/lib/zircon/rust:fuchsia-zircon",
     "//src/storage/bin/blobfs",
     "//src/storage/testing/rust:storage-isolated-driver-manager",
-    "//src/sys/pkg/bin/pkgfs:pkgsvr",
-    "//src/sys/pkg/lib/fuchsia-merkle",
     "//third_party/rust_crates:argh",
     "//third_party/rust_crates:log",
 
diff --git a/src/security/pkg_test/fshost/meta/fshost.shard.cml b/src/security/pkg_test/fshost/meta/fshost.shard.cml
index a052b89..a2f57eb 100644
--- a/src/security/pkg_test/fshost/meta/fshost.shard.cml
+++ b/src/security/pkg_test/fshost/meta/fshost.shard.cml
@@ -13,7 +13,6 @@
 
         // Note: Manifests including this shard must provide args:
         //   --fvm-block-file-path
-        //   --system-image-path
     },
     children: [
         {
@@ -23,17 +22,12 @@
     ],
     capabilities: [
         {
-            directory: "blob",
-            rights: [ "rw*" ],
-            path: "/blob",
-        },
-        {
-            directory: "pkgfs",
+            directory: "blob-exec",
             rights: [
                 "execute",
                 "rw*",
             ],
-            path: "/pkgfs",
+            path: "/blob",
         },
     ],
     use: [
@@ -55,10 +49,7 @@
     ],
     expose: [
         {
-            directory: [
-                "blob",
-                "pkgfs",
-            ],
+            directory: "blob-exec",
             from: "self",
         },
         {
diff --git a/src/security/pkg_test/fshost/src/fshost.rs b/src/security/pkg_test/fshost/src/fshost.rs
index 7399ff8..0a6292f 100644
--- a/src/security/pkg_test/fshost/src/fshost.rs
+++ b/src/security/pkg_test/fshost/src/fshost.rs
@@ -3,13 +3,11 @@
 // found in the LICENSE file.
 
 use {
-    crate::{pkgfs::PkgfsInstance, storage::BlobfsInstance},
+    crate::storage::BlobfsInstance,
     fidl_fuchsia_io as fio,
-    fuchsia_merkle::MerkleTree,
     fuchsia_runtime::{take_startup_handle, HandleType},
     fuchsia_syslog::fx_log_info,
-    io_util::directory::open_in_namespace,
-    std::{fs::File, ops::Drop},
+    std::ops::Drop,
     vfs::{
         directory::entry::DirectoryEntry, execution_scope::ExecutionScope, path::Path,
         pseudo_directory, remote::remote_dir,
@@ -18,29 +16,16 @@
 
 pub struct FSHost {
     blobfs: BlobfsInstance,
-    pkgfs: Option<PkgfsInstance>,
 }
 
 impl FSHost {
-    pub async fn new(fvm_path: &str, blobfs_mountpoint: &str, system_image_path: &str) -> Self {
+    pub async fn new(fvm_path: &str, blobfs_mountpoint: &str) -> Self {
         fx_log_info!("Starting blobfs from FVM image at {}", fvm_path);
         let mut blobfs = BlobfsInstance::new_from_resource(fvm_path).await;
         fx_log_info!("Mounting blobfs at {}", blobfs_mountpoint);
         blobfs.mount(blobfs_mountpoint);
-        let blobfs_dir = open_in_namespace(
-            blobfs_mountpoint,
-            fio::OpenFlags::RIGHT_READABLE
-                | fio::OpenFlags::RIGHT_WRITABLE
-                | fio::OpenFlags::RIGHT_EXECUTABLE,
-        )
-        .unwrap();
-        let mut system_image_file = File::open(system_image_path).unwrap();
-        let system_image_merkle = MerkleTree::from_reader(&mut system_image_file).unwrap().root();
 
-        fx_log_info!("Starting pkgfs with system image merkle {}", system_image_merkle.to_string());
-        let pkgfs = PkgfsInstance::new(blobfs_dir, system_image_merkle);
-
-        Self { blobfs, pkgfs: Some(pkgfs) }
+        Self { blobfs }
     }
 
     pub async fn serve(&self) {
@@ -48,7 +33,6 @@
 
         let out_dir = pseudo_directory! {
             "blob" => remote_dir(self.blobfs.open_root_dir()),
-            "pkgfs" => remote_dir(self.pkgfs.as_ref().unwrap().proxy()),
         };
         let scope = ExecutionScope::new();
         out_dir.open(
@@ -69,8 +53,6 @@
 
 impl Drop for FSHost {
     fn drop(&mut self) {
-        // Drop pkgfs before unmounting blobfs.
-        self.pkgfs = None;
         self.blobfs.unmount();
     }
 }
diff --git a/src/security/pkg_test/fshost/src/main.rs b/src/security/pkg_test/fshost/src/main.rs
index 9e33f91..abc7ac1 100644
--- a/src/security/pkg_test/fshost/src/main.rs
+++ b/src/security/pkg_test/fshost/src/main.rs
@@ -3,7 +3,6 @@
 // found in the LICENSE file.
 
 mod fshost;
-mod pkgfs;
 mod storage;
 
 use {
@@ -19,9 +18,6 @@
     /// absolute path to fvm block file used to bootstrap blobfs.
     #[argh(option)]
     fvm_block_file_path: String,
-    /// absolute path to system image package file for bootstrapping pkgfs.
-    #[argh(option)]
-    system_image_path: String,
 }
 
 #[fuchsia_async::run_singlethreaded]
@@ -34,8 +30,5 @@
 
     fx_log_info!("Initalizing fshost with {:#?}", args);
 
-    fshost::FSHost::new(&args.fvm_block_file_path, BLOBFS_MOUNTPOINT, &args.system_image_path)
-        .await
-        .serve()
-        .await
+    fshost::FSHost::new(&args.fvm_block_file_path, BLOBFS_MOUNTPOINT).await.serve().await
 }
diff --git a/src/security/pkg_test/fshost/src/pkgfs.rs b/src/security/pkg_test/fshost/src/pkgfs.rs
deleted file mode 100644
index 389b993..0000000
--- a/src/security/pkg_test/fshost/src/pkgfs.rs
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2021 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-use {
-    fdio::{SpawnAction, SpawnOptions},
-    fidl::endpoints::{create_proxy, Proxy},
-    fidl_fuchsia_io as fio,
-    fuchsia_merkle::Hash,
-    fuchsia_runtime::{HandleInfo, HandleType},
-    fuchsia_syslog::fx_log_info,
-    scoped_task::{job_default, spawn_etc, Scoped},
-    std::ffi::{CStr, CString},
-};
-
-const PKGSVR_PATH: &str = "/pkg/bin/pkgsvr";
-
-pub struct PkgfsInstance {
-    _system_image_merkle: Hash,
-    _process: Scoped<fuchsia_zircon::Process>,
-    proxy: fio::DirectoryProxy,
-}
-
-impl PkgfsInstance {
-    /// Instantiate pkgfs in a sub-process with two inputs: the blobfs root
-    /// directory handle and the system image merkle root hash.
-    pub fn new(blobfs_root_dir: fio::DirectoryProxy, system_image_merkle: Hash) -> Self {
-        let args = vec![
-            CString::new(PKGSVR_PATH).unwrap(),
-            CString::new(system_image_merkle.to_string().as_bytes()).unwrap(),
-        ];
-        let argv = args.iter().map(AsRef::as_ref).collect::<Vec<&CStr>>();
-
-        let pkgfs_root_handle_info = HandleInfo::new(HandleType::User0, 0);
-        let (proxy, pkgfs_root_server_end) = create_proxy::<fio::DirectoryMarker>().unwrap();
-
-        fx_log_info!("Spawning pkgfs process; binary: {}", PKGSVR_PATH);
-
-        let process = spawn_etc(
-            job_default(),
-            SpawnOptions::CLONE_ALL,
-            &CString::new(PKGSVR_PATH).unwrap(),
-            &argv,
-            None,
-            &mut [
-                SpawnAction::add_handle(
-                    pkgfs_root_handle_info,
-                    pkgfs_root_server_end.into_channel().into(),
-                ),
-                SpawnAction::add_namespace_entry(
-                    &CString::new("/blob").unwrap(),
-                    blobfs_root_dir.into_channel().unwrap().into_zx_channel().into(),
-                ),
-            ],
-        )
-        .unwrap();
-
-        fx_log_info!("Spawned pkgfs process; binary: {}", PKGSVR_PATH);
-
-        Self { _system_image_merkle: system_image_merkle, _process: process, proxy }
-    }
-
-    pub fn proxy(&self) -> fio::DirectoryProxy {
-        let (proxy, server_end) = create_proxy::<fio::DirectoryMarker>().unwrap();
-        let server_end = server_end.into_channel().into();
-        self.proxy.clone(fio::OpenFlags::CLONE_SAME_RIGHTS, server_end).unwrap();
-        proxy
-    }
-}
diff --git a/src/security/pkg_test/fshost/src/storage.rs b/src/security/pkg_test/fshost/src/storage.rs
index 7838db3..3d54b3d 100644
--- a/src/security/pkg_test/fshost/src/storage.rs
+++ b/src/security/pkg_test/fshost/src/storage.rs
@@ -92,7 +92,9 @@
         if let Some(blobfs_dir) = &self.blobfs_dir {
             return open_directory_in_namespace(
                 blobfs_dir,
-                fio::OpenFlags::RIGHT_WRITABLE | fio::OpenFlags::RIGHT_READABLE,
+                fio::OpenFlags::RIGHT_READABLE
+                    | fio::OpenFlags::RIGHT_WRITABLE
+                    | fio::OpenFlags::RIGHT_EXECUTABLE,
             )
             .unwrap();
         }
diff --git a/src/security/pkg_test/tests/access_ota_blob_as_executable/meta/access_ota_blob_as_executable.cml b/src/security/pkg_test/tests/access_ota_blob_as_executable/meta/access_ota_blob_as_executable.cml
index c6c92a3..a874537 100644
--- a/src/security/pkg_test/tests/access_ota_blob_as_executable/meta/access_ota_blob_as_executable.cml
+++ b/src/security/pkg_test/tests/access_ota_blob_as_executable/meta/access_ota_blob_as_executable.cml
@@ -105,7 +105,7 @@
         // Attempt to access packages via the `pkgfs` APIs.
         {
             directory: "pkgfs",
-            from: "#fshost",
+            from: "#pkg-cache",
             rights: [ "rx*" ],
             path: "/pkgfs",
         },
@@ -136,8 +136,9 @@
     ],
     offer: [
         {
-            directory: "blob",
+            directory: "blob-exec",
             from: "parent",
+            as: "blob",
             to: "#pkg_server",
         },
         {
@@ -146,10 +147,7 @@
             to: "#pkg_server",
         },
         {
-            directory: [
-                "blob",
-                "pkgfs",
-            ],
+            directory: "blob-exec",
             from: "#fshost",
             to: "#pkg-cache",
         },
@@ -165,38 +163,30 @@
             ],
         },
         {
-            directory: "pkgfs",
-            from: "#fshost",
+            directory: "system",
+            from: "#pkg-cache",
             as: "pkgfs-system",
-            to: [
-                "#pkg-resolver",
-                "#system-updater",
-            ],
-            subdir: "system",
+            to: "#system-updater",
         },
         {
-            directory: "pkgfs",
-            from: "#fshost",
-            as: "config-data",
+            directory: "config-data",
+            from: "#pkg-cache",
             to: "#pkg-resolver",
-            subdir: "packages/config-data/0/meta/data/pkg-resolver",
+            subdir: "pkg-resolver",
         },
         {
-            directory: "pkgfs",
-            from: "#fshost",
-            as: "config-data",
+            directory: "config-data",
+            from: "#pkg-cache",
             to: "#system-update-committer",
-            subdir: "packages/config-data/0/meta/data/system-update-committer",
+            subdir: "system-update-committer",
         },
         {
-            directory: "pkgfs",
-            from: "#fshost",
-            as: "root-ssl-certificates",
+            directory: "root-ssl-certificates",
+            from: "#pkg-cache",
             to: [
                 "#pkg-resolver",
                 "#pkg_server",
             ],
-            subdir: "packages/root_ssl_certificates/0/data",
         },
         {
             storage: "data",
@@ -280,11 +270,9 @@
             ],
         },
         {
-            directory: "pkgfs",
-            from: "#fshost",
-            as: "build-info",
+            directory: "build-info",
+            from: "#pkg-cache",
             to: "#system-updater",
-            subdir: "packages/build-info/0/data",
         },
         {
             protocol: "fuchsia.cobalt.LoggerFactory",
@@ -299,6 +287,12 @@
             protocol: "fuchsia.update.CommitStatusProvider",
             from: "#system-update-committer",
             to: "#pkg-cache",
+
+            // system-update-committer depends on pkg-cache for config-data, which forms a cycle.
+            // pkg-cache does not need fuchsia.update.CommitStatusProvider to provide config-data,
+            // so this edge is marked weak to break the cycle. This mirrors the production
+            // configuration in bootstrap.cml
+            dependency: "weak",
         },
         {
             protocol: [ "fuchsia.hardware.power.statecontrol.Admin" ],
diff --git a/src/security/pkg_test/tests/access_ota_blob_as_executable/src/main.rs b/src/security/pkg_test/tests/access_ota_blob_as_executable/src/main.rs
index 0bf0cda..640ea42 100644
--- a/src/security/pkg_test/tests/access_ota_blob_as_executable/src/main.rs
+++ b/src/security/pkg_test/tests/access_ota_blob_as_executable/src/main.rs
@@ -3,7 +3,7 @@
 // found in the LICENSE file.
 
 use {
-    anyhow::Result,
+    anyhow::{Context as _, Result},
     argh::{from_env, FromArgs},
     fidl::endpoints::{create_endpoints, create_proxy, ServerEnd},
     fidl_fuchsia_io as fio,
@@ -81,7 +81,6 @@
     }
 
     /// Signals whether both readable and executable results are errors.
-    #[allow(unused)]
     pub fn is_readable_executable_err(&self) -> bool {
         self.readable.is_err() && self.executable.is_err()
     }
@@ -217,15 +216,28 @@
             let pkg_cache_proxy = connect_to_protocol::<PackageCacheMarker>().unwrap();
             let (package_directory_proxy, package_directory_server_end) =
                 create_proxy::<fio::DirectoryMarker>().unwrap();
-            pkg_cache_proxy
+            // The updated version of the package resolved during OTA is not added to the dynamic
+            // index (because the system-updater adds it to the retained index before resolving it),
+            // so attempting to PackageCache.Open it should fail with NOT_FOUND. We convert
+            // NOT_FOUND into not readable and not executable so we can verify that the OTA
+            // process does not create more executable packages. Non NOT_FOUND errors fail the test
+            // to help catch misconfigurations of the test environment.
+            match pkg_cache_proxy
                 .open(&mut package_blob_id.clone(), package_directory_server_end)
                 .await
                 .unwrap()
-                .unwrap();
-            Some((
-                self.attempt_readable(&package_directory_proxy).await,
-                self.attempt_executable(&package_directory_proxy).await,
-            ))
+            {
+                Ok(()) => Some((
+                    self.attempt_readable(&package_directory_proxy).await,
+                    self.attempt_executable(&package_directory_proxy).await,
+                )),
+                Err(e) if Status::from_raw(e) == Status::NOT_FOUND => Some((
+                    Status::ok(e).context("PackageCache.Open failed"),
+                    Result::<Box<fidl_fuchsia_mem::Buffer>, _>::Err(Status::from_raw(e))
+                        .context("PackageCache.Open failed"),
+                )),
+                Err(e) => panic!("unexpected PackageCache.Open error {:?}", e),
+            }
         } else {
             fx_log_info!(
                 "Skipping open package via fuchsia.pkg/PackageCache.Open: {}",
@@ -647,11 +659,19 @@
         hello_world_v1_access_check.perform_access_check().await;
 
     // Post-update new version access check: Access should fail on all
-    // hash-qualified attempts to open as executable.
-    assert!(hello_world_v1_access_check_result.pkgfs_versions.unwrap().is_executable_err());
-    assert!(hello_world_v1_access_check_result.pkg_cache_open.unwrap().is_executable_err());
+    // hash-qualified attempts to open as executable. Additionally, the system-updater adds
+    // the OTA packages to the retained index before resolving them, preventing the packages from
+    // appearing in the dynamic index, so we should not be able to obtain readable packages from
+    // the pkgfs directories or PackageCache.Open.
+    assert!(hello_world_v1_access_check_result
+        .pkgfs_versions
+        .unwrap()
+        .is_readable_executable_err());
+    assert!(hello_world_v1_access_check_result
+        .pkg_cache_open
+        .unwrap()
+        .is_readable_executable_err());
     assert!(hello_world_v1_access_check_result.pkg_cache_get.unwrap().is_executable_err());
-
     assert!(hello_world_v1_access_check_result.pkg_resolver_with_hash.unwrap().is_executable_err());
 
     let hello_world_v0_access_check_result =
diff --git a/src/security/pkg_test/tests/meta/fshost_v0.cml b/src/security/pkg_test/tests/meta/fshost_v0.cml
index 1e13a20..06b01ca 100644
--- a/src/security/pkg_test/tests/meta/fshost_v0.cml
+++ b/src/security/pkg_test/tests/meta/fshost_v0.cml
@@ -7,8 +7,6 @@
         args: [
             "--fvm-block-file-path",
             "/pkg/data/assemblies/hello_world_v0/fvm.blk",
-            "--system-image-path",
-            "/pkg/data/assemblies/hello_world_v0/base.far",
         ],
     },
 }
diff --git a/src/security/policy/component_manager_policy.json5 b/src/security/policy/component_manager_policy.json5
index 6dd17bd..a9c7db2 100644
--- a/src/security/policy/component_manager_policy.json5
+++ b/src/security/policy/component_manager_policy.json5
@@ -224,18 +224,6 @@
             {
                 source_moniker: "/bootstrap/fshost",
                 source: "component",
-                source_name: "bin",
-                capability: "directory",
-                target_monikers: [
-                    "/bootstrap",
-                    "/bootstrap/console-launcher",
-                    "/bootstrap/fshost",
-                    "/core/sshd-host",
-                ],
-            },
-            {
-                source_moniker: "/bootstrap/fshost",
-                source: "component",
                 source_name: "blob",
                 capability: "directory",
                 target_monikers: [
@@ -250,22 +238,6 @@
             {
                 source_moniker: "/bootstrap/fshost",
                 source: "component",
-                source_name: "pkgfs",
-                capability: "directory",
-                target_monikers: [
-                    "/bootstrap",
-                    "/bootstrap/console-launcher",
-                    "/bootstrap/driver_manager",
-                    "/bootstrap/fshost",
-                    "/core",
-                    "/core/appmgr",
-                    "/core/pkg-cache",
-                    "/core/sshd-host",
-                ],
-            },
-            {
-                source_moniker: "/bootstrap/fshost",
-                source: "component",
                 source_name: "minfs",
                 capability: "directory",
                 target_monikers: [
@@ -285,23 +257,6 @@
             {
                 source_moniker: "/bootstrap/fshost",
                 source: "component",
-                source_name: "system",
-                capability: "directory",
-                target_monikers: [
-                    "/bootstrap",
-                    "/bootstrap/console-launcher",
-                    "/bootstrap/driver_manager",
-                    "/bootstrap/fshost",
-                    "/core",
-                    "/core/appmgr",
-                    "/core/sshd-host",
-                    "/core/system-updater",
-                    "/core/system-update-checker",
-                ],
-            },
-            {
-                source_moniker: "/bootstrap/fshost",
-                source: "component",
                 source_name: "tmp",
                 capability: "directory",
                 target_monikers: [
@@ -320,24 +275,6 @@
             {
                 source_moniker: "/bootstrap/fshost",
                 source: "component",
-                source_name: "build-info",
-                capability: "directory",
-                target_monikers: [
-                    "/bootstrap/fshost",
-                    "/core/build-info",
-                    "/core/feedback",
-                    "/core/omaha-client-service",
-                    "/core/sshd-host",
-                    "/core/system-updater",
-
-                    // TODO(fxbug.dev/91934): Once we can define test realms out of tree
-                    // we should remove this.
-                    "/core/test_manager/chromium-tests:**",
-                ],
-            },
-            {
-                source_moniker: "/bootstrap/fshost",
-                source: "component",
                 source_name: "deprecated-misc-storage",
                 capability: "directory",
                 target_monikers: [
@@ -354,6 +291,8 @@
                 target_monikers: [
                     "/bootstrap/fshost",
                     "/bootstrap/fshost/blobfs",
+                    "/bootstrap/pkg-cache",
+                    "/bootstrap/pkg_cache_resolver",
                 ],
             },
             {
@@ -388,7 +327,7 @@
             {
                 // We restrict access to PackageCache because it gives direct access to package
                 // handles which provide executability which bypass VX security policy.
-                source_moniker: "/core/pkg-cache",
+                source_moniker: "/bootstrap/pkg-cache",
                 source: "component",
                 source_name: "fuchsia.pkg.PackageCache",
                 capability: "protocol",
@@ -402,7 +341,7 @@
                 // We restrict access to RetainedPackages because it gives callers the ability
                 // to override certain package garbage collection behavior intended to only be
                 // used by the system updater.
-                source_moniker: "/core/pkg-cache",
+                source_moniker: "/bootstrap/pkg-cache",
                 source: "component",
                 source_name: "fuchsia.pkg.RetainedPackages",
                 capability: "protocol",
@@ -411,6 +350,91 @@
                 ],
             },
             {
+                // We restrict access to PackageCache because it gives direct access to executable
+                // binaries.
+                source_moniker: "/bootstrap/pkg-cache",
+                source: "component",
+                source_name: "bin",
+                capability: "directory",
+                target_monikers: [
+                    "/bootstrap/console-launcher",
+                    "/core/sshd-host",
+                ],
+            },
+            {
+                source_moniker: "/bootstrap/pkg-cache",
+                source: "component",
+                source_name: "build-info",
+                capability: "directory",
+                target_monikers: [
+                    "/core/build-info",
+                    "/core/feedback",
+                    "/core/omaha-client-service",
+                    "/core/sshd-host",
+                    "/core/system-updater",
+
+                    // TODO(fxbug.dev/91934): Once we can define test realms out of tree
+                    // we should remove this.
+                    "/core/test_manager/chromium-tests:**",
+                ],
+            },
+            {
+                // We restrict access to pkgfs because it gives direct access to executable package
+                // handles.
+                source_moniker: "/bootstrap/pkg-cache",
+                source: "component",
+                source_name: "pkgfs",
+                capability: "directory",
+                target_monikers: [
+                    "/bootstrap/console-launcher",
+                    "/bootstrap/driver_manager",
+                    "/core",
+                    "/core/appmgr",
+                    "/core/sshd-host",
+                ],
+            },
+            {
+                // We restrict access to pkgfs-packages because it gives direct access to
+                // executable package handles.
+                source_moniker: "/bootstrap/pkg-cache",
+                source: "component",
+                source_name: "pkgfs-packages",
+                capability: "directory",
+                target_monikers: [
+                    "/bootstrap/base_resolver",
+                    "/bootstrap/driver_index",
+                ],
+            },
+            {
+                // We restrict access to pkgfs-versions because it gives direct access to
+                // executable package handles.
+                source_moniker: "/bootstrap/pkg-cache",
+                source: "component",
+                source_name: "pkgfs-versions",
+                capability: "directory",
+                target_monikers: [
+                    // TODO(fxbug.dev/99692) migrate clients of "pkgfs" to just the subdirectories
+                ],
+            },
+            {
+                // We restrict access to system because it gives direct access to executable
+                // binaries.
+                source_moniker: "/bootstrap/pkg-cache",
+                source: "component",
+                source_name: "system",
+                capability: "directory",
+                target_monikers: [
+                    "/bootstrap",
+                    "/bootstrap/console-launcher",
+                    "/bootstrap/driver_manager",
+                    "/core",
+                    "/core/appmgr",
+                    "/core/sshd-host",
+                    "/core/system-updater",
+                    "/core/system-update-checker",
+                ],
+            },
+            {
                 // We restrict access to base-resolver's ComponentResolver protocol because we
                 // expect only parts of component framework to be able to access it.
                 source_moniker: "/bootstrap/base-resolver",
diff --git a/src/storage/fshost/block-device.cc b/src/storage/fshost/block-device.cc
index bb03311..787e34f 100644
--- a/src/storage/fshost/block-device.cc
+++ b/src/storage/fshost/block-device.cc
@@ -701,7 +701,6 @@
         FX_PLOGS(ERROR, status) << "Failed to mount blobfs partition";
         return status;
       }
-      mounter_->TryMountPkgfs();
       return ZX_OK;
     }
     case fs_management::kDiskFormatFxfs:
diff --git a/src/storage/fshost/filesystem-mounter-test.cc b/src/storage/fshost/filesystem-mounter-test.cc
index c32603d..69c4c96 100644
--- a/src/storage/fshost/filesystem-mounter-test.cc
+++ b/src/storage/fshost/filesystem-mounter-test.cc
@@ -63,15 +63,6 @@
 
 TEST_F(MounterTest, CreateFilesystemMounter) { FilesystemMounter mounter(manager(), &config_); }
 
-TEST_F(MounterTest, PkgfsWillNotMountBeforeBlobAndData) {
-  FilesystemMounter mounter(manager(), &config_);
-
-  ASSERT_FALSE(mounter.BlobMounted());
-  ASSERT_FALSE(mounter.DataMounted());
-  mounter.TryMountPkgfs();
-  EXPECT_FALSE(mounter.PkgfsMounted());
-}
-
 enum class FilesystemType {
   kBlobfs,
   kMinfs,
@@ -180,19 +171,5 @@
   ASSERT_TRUE(mounter.FactoryMounted());
 }
 
-TEST_F(MounterTest, PkgfsMountsWithBlob) {
-  config_ = EmptyConfig();
-  TestMounter mounter(manager(), &config_);
-
-  mounter.ExpectFilesystem(FilesystemType::kBlobfs);
-  ASSERT_OK(mounter.MountBlob(zx::channel(), fuchsia_fs_startup::wire::StartOptions()));
-  ASSERT_TRUE(mounter.BlobMounted());
-
-  ASSERT_FALSE(mounter.DataMounted());
-
-  mounter.TryMountPkgfs();
-  EXPECT_TRUE(mounter.PkgfsMounted());
-}
-
 }  // namespace
 }  // namespace fshost
diff --git a/src/storage/fshost/filesystem-mounter.cc b/src/storage/fshost/filesystem-mounter.cc
index 7ed0223..5b1b63a 100644
--- a/src/storage/fshost/filesystem-mounter.cc
+++ b/src/storage/fshost/filesystem-mounter.cc
@@ -232,25 +232,6 @@
   return ZX_OK;
 }
 
-void FilesystemMounter::TryMountPkgfs() {
-  // Pkgfs waits for Blobfs to mount before initializing. Pkgfs is launched from blobfs, so this is
-  // a hard requirement.
-  //
-  // TODO(fxbug.dev/38621): In the future, this mechanism may be replaced with a feed-forward
-  // design to the mounted filesystems.
-  if (!blob_mounted_ || pkgfs_mounted_) {
-    return;
-  }
-
-  // Historically we don't retry if pkgfs fails to launch, which seems reasonable since the
-  // cause of a launch failure is unlikely to be transient.
-  // TODO(fxbug.dev/58363): fshost should handle failures to mount critical filesystems better.
-  if (zx::status status = LaunchPkgfs(this); status.is_error()) {
-    FX_PLOGS(ERROR, status.status_value()) << "failed to launch pkgfs";
-  }
-  pkgfs_mounted_ = true;
-}
-
 void FilesystemMounter::ReportDataPartitionCorrupted() {
   fshost_.mutable_metrics()->LogDataCorruption();
   fshost_.FlushMetrics();
diff --git a/src/storage/fshost/filesystem-mounter.h b/src/storage/fshost/filesystem-mounter.h
index 76b2176..6f85dcd 100644
--- a/src/storage/fshost/filesystem-mounter.h
+++ b/src/storage/fshost/filesystem-mounter.h
@@ -60,17 +60,11 @@
   zx_status_t MountFactoryFs(zx::channel block_device_client,
                              const fs_management::MountOptions& options);
 
-  // Attempts to mount pkgfs if all preconditions have been met:
-  // - Pkgfs has not previously been mounted
-  // - Blobfs has been mounted
-  void TryMountPkgfs();
-
   std::shared_ptr<FshostBootArgs> boot_args() { return fshost_.boot_args(); }
   void ReportDataPartitionCorrupted();
 
   bool BlobMounted() const { return blob_mounted_; }
   bool DataMounted() const { return data_mounted_; }
-  bool PkgfsMounted() const { return pkgfs_mounted_; }
   bool FactoryMounted() const { return factory_mounted_; }
   bool DurableMounted() const { return durable_mounted_; }
 
@@ -110,7 +104,6 @@
   bool data_mounted_ = false;
   bool durable_mounted_ = false;
   bool blob_mounted_ = false;
-  bool pkgfs_mounted_ = false;
   bool factory_mounted_ = false;
   fidl::ClientEnd<fuchsia_io::Directory> crypt_outgoing_directory_;
 };
diff --git a/src/storage/fshost/meta/core_fshost.cml b/src/storage/fshost/meta/core_fshost.cml
index 0ad9e46..e2c28d0 100644
--- a/src/storage/fshost/meta/core_fshost.cml
+++ b/src/storage/fshost/meta/core_fshost.cml
@@ -9,46 +9,6 @@
             startup: "eager",
         },
     ],
-    capabilities: [
-        {
-            directory: "pkgfs",
-            rights: [
-                "execute",
-                "rw*",
-            ],
-            path: "/fs/pkgfs",
-        },
-        {
-            directory: "pkgfs-packages",
-            rights: [ "rx*" ],
-            path: "/fs/pkgfs/packages",
-        },
-        {
-            directory: "system",
-            rights: [ "rx*" ],
-            path: "/fs/pkgfs/system",
-        },
-        {
-            directory: "bin",
-            rights: [ "rx*" ],
-            path: "/fs/pkgfs/packages/shell-commands/0/bin",
-        },
-        {
-            directory: "config-data",
-            rights: [ "r*" ],
-            path: "/fs/pkgfs/packages/config-data/0/meta/data",
-        },
-        {
-            directory: "root-ssl-certificates",
-            rights: [ "r*" ],
-            path: "/fs/pkgfs/packages/root_ssl_certificates/0/data",
-        },
-        {
-            directory: "build-info",
-            rights: [ "r*" ],
-            path: "/fs/pkgfs/packages/build-info/0/data",
-        },
-    ],
     use: [
         {
             protocol: [
@@ -98,16 +58,8 @@
             as: "blob",
         },
         {
-            directory: [
-                "bin",
-                "build-info",
-                "config-data",
-                "pkgfs",
-                "pkgfs-packages",
-                "root-ssl-certificates",
-                "system",
-            ],
-            from: "self",
+            directory: "blob-exec",
+            from: "#blobfs",
         },
         {
             protocol: "fuchsia.update.verify.BlobfsVerifier",
diff --git a/src/sys/appmgr/meta/appmgr.core_shard.cml b/src/sys/appmgr/meta/appmgr.core_shard.cml
index 2dfff68..fdb77fa 100644
--- a/src/sys/appmgr/meta/appmgr.core_shard.cml
+++ b/src/sys/appmgr/meta/appmgr.core_shard.cml
@@ -235,7 +235,7 @@
                 "fuchsia.pkg.PackageCache",
                 "fuchsia.space.Manager",
             ],
-            from: "#pkg-cache",
+            from: "parent",
         },
         {
             protocol: [
diff --git a/src/sys/base-resolver/BUILD.gn b/src/sys/base-resolver/BUILD.gn
index 51dec56..9f11d71 100644
--- a/src/sys/base-resolver/BUILD.gn
+++ b/src/sys/base-resolver/BUILD.gn
@@ -69,7 +69,10 @@
 }
 
 component_manifest_resources("root-manifest") {
-  sources = [ "meta/base-resolver.cml" ]
+  sources = [
+    "meta/base-resolver.cml",
+    "meta/pkg-cache-resolver.cml",
+  ]
 }
 
 fuchsia_component("base-resolver-component") {
diff --git a/src/sys/bootstrap/meta/bootstrap.cml b/src/sys/bootstrap/meta/bootstrap.cml
index 7e56275..a6d32ac 100644
--- a/src/sys/bootstrap/meta/bootstrap.cml
+++ b/src/sys/bootstrap/meta/bootstrap.cml
@@ -99,6 +99,10 @@
             url: "fuchsia-boot:///#meta/base-resolver.cm",
         },
         {
+            name: "pkg_cache_resolver",
+            url: "fuchsia-boot:///#meta/pkg-cache-resolver.cm",
+        },
+        {
             name: "power_manager",
             url: "fuchsia-boot:///#meta/power-manager.cm",
         },
@@ -114,6 +118,11 @@
             url: "fuchsia-pkg://fuchsia.com/universe-resolver#meta/universe-resolver.cm",
             environment: "#base-resolver-env",
         },
+        {
+            name: "pkg-cache",
+            url: "fuchsia-pkg-cache:///#meta/pkg-cache.cm",
+            environment: "#pkg-cache-env",
+        },
     ],
     collections: [
         {
@@ -552,11 +561,18 @@
         {
             directory: [
                 "bin",
+                "pkgfs",
+                "system",
+            ],
+            from: "#pkg-cache",
+            to: "#console-launcher",
+            dependency: "weak_for_migration",
+        },
+        {
+            directory: [
                 "blob",
                 "minfs",
                 "mnt",
-                "pkgfs",
-                "system",
             ],
             from: "#fshost",
             to: [ "#console-launcher" ],
@@ -564,7 +580,7 @@
         },
         {
             directory: "system",
-            from: "#fshost",
+            from: "#pkg-cache",
             to: "#driver_manager",
             dependency: "weak_for_migration",
         },
@@ -739,6 +755,7 @@
                 "#live_usb",
                 "#netsvc",
                 "#pkg-drivers",
+                "#pkg_cache_resolver",
                 "#power_manager",
                 "#pwrbtn-monitor",
                 "#svchost",
@@ -811,7 +828,7 @@
         },
         {
             directory: "pkgfs",
-            from: "#fshost",
+            from: "#pkg-cache",
             to: "#driver_manager",
             dependency: "weak_for_migration",
         },
@@ -881,12 +898,45 @@
         // -- End Archivist offers --
         {
             directory: "pkgfs-packages",
-            from: "#fshost",
+            from: "#pkg-cache",
             to: "#base_resolver",
         },
         {
-            directory: "pkgfs-packages",
+            protocol: "fuchsia.boot.Arguments",
+            from: "parent",
+            to: "#pkg_cache_resolver",
+        },
+        {
+            directory: "blob-exec",
             from: "#fshost",
+            to: [
+                "#pkg-cache",
+                "#pkg_cache_resolver",
+            ],
+        },
+        {
+            protocol: "fuchsia.boot.Arguments",
+            from: "parent",
+            to: "#pkg-cache",
+        },
+        {
+            protocol: "fuchsia.logger.LogSink",
+            from: "#archivist",
+            to: "#pkg-cache",
+        },
+        {
+            protocol: [
+                "fuchsia.cobalt.LoggerFactory",
+                "fuchsia.tracing.provider.Registry",
+                "fuchsia.update.CommitStatusProvider",
+            ],
+            from: "parent",
+            to: "#pkg-cache",
+            dependency: "weak",
+        },
+        {
+            directory: "pkgfs-packages",
+            from: "#pkg-cache",
             to: "#driver_index",
             dependency: "weak_for_migration",
         },
@@ -910,22 +960,27 @@
     ],
     expose: [
         {
+            directory: [
+                "bin",
+                "build-info",
+                "config-data",
+                "pkgfs",
+                "root-ssl-certificates",
+                "system",
+            ],
+            from: "#pkg-cache",
+        },
+        {
             directory: "dev",
             from: "#driver_manager",
         },
         {
             directory: [
-                "bin",
                 "blob",
-                "build-info",
-                "config-data",
                 "durable",
                 "factory",
                 "minfs",
                 "mnt",
-                "pkgfs",
-                "root-ssl-certificates",
-                "system",
                 "tmp",
             ],
             from: "#fshost",
@@ -1038,6 +1093,14 @@
             protocol: "fuchsia.nand.flashmap.Manager",
             from: "#flashmap",
         },
+        {
+            protocol: [
+                "fuchsia.pkg.PackageCache",
+                "fuchsia.pkg.RetainedPackages",
+                "fuchsia.space.Manager",
+            ],
+            from: "#pkg-cache",
+        },
     ],
     environments: [
         {
@@ -1132,5 +1195,16 @@
             // 1 second
             __stop_timeout_ms: 1000,
         },
+        {
+            name: "pkg-cache-env",
+            extends: "realm",
+            resolvers: [
+                {
+                    resolver: "pkg_cache_resolver",
+                    from: "#pkg_cache_resolver",
+                    scheme: "fuchsia-pkg-cache",
+                },
+            ],
+        },
     ],
 }
diff --git a/src/sys/core/build/core.gni b/src/sys/core/build/core.gni
index 6a0e2a7..c1b2517 100644
--- a/src/sys/core/build/core.gni
+++ b/src/sys/core/build/core.gni
@@ -72,7 +72,7 @@
   generated_file(generated_file_target) {
     deps = shard_deps
     data_keys = [ "shard_files" ]
-    outputs = [ "$target_out_dir/core_shard_list" ]
+    outputs = [ "$target_out_dir/${target_name}_core_shard_list" ]
   }
   core_shard_list = get_target_outputs(":$generated_file_target")
 
diff --git a/src/sys/core/meta/core.cml b/src/sys/core/meta/core.cml
index d2ed5c6..3eef8f8 100644
--- a/src/sys/core/meta/core.cml
+++ b/src/sys/core/meta/core.cml
@@ -154,10 +154,6 @@
             environment: "#full-resolver-env",
         },
         {
-            name: "pkg-cache",
-            url: "fuchsia-pkg://fuchsia.com/pkg-cache#meta/pkg-cache.cm",
-        },
-        {
             name: "pkg-resolver",
             url: "fuchsia-pkg://fuchsia.com/pkg-resolver#meta/pkg-resolver.cm",
         },
@@ -351,32 +347,11 @@
             to: "#universe-resolver",
         },
         {
-            directory: [
-                "blob",
-                "pkgfs",
+            protocol: [
+                "fuchsia.boot.Arguments",
+                "fuchsia.pkg.PackageCache",
             ],
             from: "parent",
-            to: "#pkg-cache",
-        },
-        {
-            protocol: [ "fuchsia.boot.Arguments" ],
-            from: "parent",
-            to: "#pkg-cache",
-        },
-        {
-            protocol: [ "fuchsia.tracing.provider.Registry" ],
-            from: "self",
-            to: "#pkg-cache",
-            dependency: "weak_for_migration",
-        },
-        {
-            protocol: "fuchsia.update.CommitStatusProvider",
-            from: "#system-update-committer",
-            to: "#pkg-cache",
-        },
-        {
-            protocol: [ "fuchsia.boot.Arguments" ],
-            from: "parent",
             to: "#pkg-resolver",
         },
         {
@@ -386,11 +361,6 @@
             dependency: "weak_for_migration",
         },
         {
-            protocol: [ "fuchsia.pkg.PackageCache" ],
-            from: "#pkg-cache",
-            to: "#pkg-resolver",
-        },
-        {
             storage: "data",
             from: "self",
             to: "#pkg-resolver",
@@ -464,7 +434,6 @@
                 "#metrics-logger",
                 "#network",
                 "#overnetstack",
-                "#pkg-cache",
                 "#pkg-resolver",
                 "#regulatory_region",
                 "#remote-control",
@@ -984,7 +953,6 @@
             protocol: "fuchsia.cobalt.LoggerFactory",
             from: "#cobalt",
             to: [
-                "#pkg-cache",
                 "#pkg-resolver",
                 "#sampler",
 
@@ -1144,6 +1112,10 @@
             protocol: [ "fuchsia.exception.Handler" ],
             from: "#exceptions",
         },
+        {
+            protocol: "fuchsia.update.CommitStatusProvider",
+            from: "#system-update-committer",
+        },
     ],
     environments: [
         {
diff --git a/src/sys/pkg/bin/pkg-cache/meta/pkg-cache.cml b/src/sys/pkg/bin/pkg-cache/meta/pkg-cache.cml
index 750b4fc..1db0747 100644
--- a/src/sys/pkg/bin/pkg-cache/meta/pkg-cache.cml
+++ b/src/sys/pkg/bin/pkg-cache/meta/pkg-cache.cml
@@ -58,14 +58,11 @@
     ],
     use: [
         {
-            // TODO(fxbug.dev/68239): Add the "execute" right once possible.
-            directory: "pkgfs",
-            rights: [ "rw*" ],
-            path: "/pkgfs",
-        },
-        {
-            directory: "blob",
-            rights: [ "rw*" ],
+            directory: "blob-exec",
+            rights: [
+                "execute",
+                "rw*",
+            ],
             path: "/blob",
         },
         {
diff --git a/src/sys/pkg/bin/pkg-cache/src/cache_service.rs b/src/sys/pkg/bin/pkg-cache/src/cache_service.rs
index 8c6dec3..1869412 100644
--- a/src/sys/pkg/bin/pkg-cache/src/cache_service.rs
+++ b/src/sys/pkg/bin/pkg-cache/src/cache_service.rs
@@ -37,13 +37,13 @@
 };
 
 pub async fn serve(
-    pkgfs_versions: pkgfs::versions::Client,
-    pkgfs_install: pkgfs::install::Client,
-    pkgfs_needs: pkgfs::needs::Client,
     package_index: Arc<async_lock::RwLock<PackageIndex>>,
     blobfs: blobfs::Client,
     base_packages: Arc<BasePackages>,
     cache_packages: Arc<Option<system_image::CachePackages>>,
+    executability_restrictions: system_image::ExecutabilityRestrictions,
+    non_static_allow_list: Arc<system_image::NonStaticAllowList>,
+    scope: package_directory::ExecutionScope,
     stream: PackageCacheRequestStream,
     cobalt_sender: CobaltSender,
     serve_id: Arc<AtomicU32>,
@@ -62,14 +62,14 @@
                         "meta_far_blob_id" => meta_far_blob.blob_id.to_string().as_str()
                     );
                     let response = get(
-                        &pkgfs_versions,
-                        &pkgfs_install,
-                        &pkgfs_needs,
-                        &package_index,
+                        package_index.as_ref(),
+                        base_packages.as_ref(),
+                        executability_restrictions,
+                        non_static_allow_list.as_ref(),
                         &blobfs,
                         meta_far_blob,
                         needed_blobs,
-                        dir,
+                        dir.map(|dir| (dir, scope.clone())),
                         cobalt_sender,
                         &node,
                     )
@@ -81,12 +81,22 @@
                     responder.send(&mut response.map_err(|status| status.into_raw()))?;
                 }
                 PackageCacheRequest::Open { meta_far_blob_id, dir, responder } => {
-                    let meta_far_blob_id: BlobId = meta_far_blob_id.into();
+                    let meta_far: Hash = BlobId::from(meta_far_blob_id).into();
                     trace::duration_begin!("app", "cache_open",
-                        "meta_far_blob_id" => meta_far_blob_id.to_string().as_str()
+                        "meta_far_blob_id" => meta_far.to_string().as_str()
                     );
-                    let response =
-                        open(&pkgfs_versions, meta_far_blob_id, dir, cobalt_sender).await;
+                    let response = open(
+                        package_index.as_ref(),
+                        base_packages.as_ref(),
+                        executability_restrictions,
+                        non_static_allow_list.as_ref(),
+                        scope.clone(),
+                        &blobfs,
+                        meta_far,
+                        dir,
+                        cobalt_sender,
+                    )
+                    .await;
                     trace::duration_end!("app", "cache_open",
                         "status" => Status::from(response).to_string().as_str()
                     );
@@ -94,7 +104,7 @@
                 }
                 PackageCacheRequest::BasePackageIndex { iterator, control_handle: _ } => {
                     let stream = iterator.into_stream()?;
-                    serve_base_package_index(base_packages.clone(), stream).await;
+                    serve_base_package_index(Arc::clone(&base_packages), stream).await;
                 }
                 PackageCacheRequest::CachePackageIndex { iterator, control_handle: _ } => {
                     let stream = iterator.into_stream()?;
@@ -159,16 +169,25 @@
     }
 }
 
+fn make_pkgdir_flags(executability_status: ExecutabilityStatus) -> fio::OpenFlags {
+    use ExecutabilityStatus::*;
+    fio::OpenFlags::RIGHT_READABLE
+        | match executability_status {
+            Allowed => fio::OpenFlags::RIGHT_EXECUTABLE,
+            Forbidden => fio::OpenFlags::empty(),
+        }
+}
+
 /// Fetch a package, and optionally open it.
 async fn get<'a>(
-    pkgfs_versions: &'a pkgfs::versions::Client,
-    pkgfs_install: &'a pkgfs::install::Client,
-    pkgfs_needs: &'a pkgfs::needs::Client,
-    package_index: &Arc<async_lock::RwLock<PackageIndex>>,
+    package_index: &async_lock::RwLock<PackageIndex>,
+    base_packages: &BasePackages,
+    executability_restrictions: system_image::ExecutabilityRestrictions,
+    non_static_allow_list: &system_image::NonStaticAllowList,
     blobfs: &blobfs::Client,
     meta_far_blob: BlobInfo,
     needed_blobs: ServerEnd<NeededBlobsMarker>,
-    dir_request: Option<ServerEnd<fio::DirectoryMarker>>,
+    dir_and_scope: Option<(ServerEnd<fio::DirectoryMarker>, package_directory::ExecutionScope)>,
     mut cobalt_sender: CobaltSender,
     node: &finspect::Node,
 ) -> Result<(), Status> {
@@ -178,56 +197,54 @@
 
     let needed_blobs = needed_blobs.into_stream().map_err(|_| Status::INTERNAL)?;
 
-    let pkg = if let Ok(pkg) = pkgfs_versions.open_package(&meta_far_blob.blob_id.into()).await {
-        // If the package can already be opened, it is already cached.
-        needed_blobs.control_handle().shutdown_with_epitaph(Status::OK);
-
-        pkg
-    } else {
-        // Otherwise, go through the process to cache it.
-        fx_log_info!("fetching {}", meta_far_blob.blob_id);
-
-        let () = serve_needed_blobs(
-            needed_blobs,
-            meta_far_blob,
-            pkgfs_install,
-            pkgfs_needs,
-            package_index,
-            blobfs,
-            node,
-        )
-        .await
-        .map_err(|e| {
-            match &e {
-                ServeNeededBlobsError::Activate(PokePkgfsError::UnexpectedNeeds(_)) => {
-                    cobalt_sender.log_event_count(
-                        metrics::PKG_CACHE_UNEXPECTED_PKGFS_NEEDS_METRIC_ID,
-                        (),
-                        0,
-                        1,
-                    );
-                }
-                _ => {}
+    let package_status =
+        match get_package_status(base_packages, package_index, &meta_far_blob.blob_id.into()).await
+        {
+            ps @ PackageStatus::Base | ps @ PackageStatus::Active(_) => {
+                let () = needed_blobs.control_handle().shutdown_with_epitaph(Status::OK);
+                ps
             }
+            PackageStatus::Other => {
+                fx_log_info!("get package {}", meta_far_blob.blob_id);
+                let name =
+                    serve_needed_blobs(needed_blobs, meta_far_blob, package_index, blobfs, node)
+                        .await
+                        .map_err(|e| {
+                            fx_log_err!(
+                                "error while caching package {}: {:#}",
+                                meta_far_blob.blob_id,
+                                anyhow!(e)
+                            );
+                            cobalt_sender.log_event_count(
+                                metrics::PKG_CACHE_OPEN_METRIC_ID,
+                                metrics::PkgCacheOpenMetricDimensionResult::Io,
+                                0,
+                                1,
+                            );
+                            Status::UNAVAILABLE
+                        })?;
+                if let Some(name) = name {
+                    PackageStatus::Active(name)
+                } else {
+                    PackageStatus::Other
+                }
+            }
+        };
 
-            fx_log_err!("error while caching package {}: {:#}", meta_far_blob.blob_id, anyhow!(e));
-
-            cobalt_sender.log_event_count(
-                metrics::PKG_CACHE_OPEN_METRIC_ID,
-                metrics::PkgCacheOpenMetricDimensionResult::Io,
-                0,
-                1,
-            );
-
-            Status::UNAVAILABLE
-        })?;
-
-        pkgfs_versions.open_package(&meta_far_blob.blob_id.into()).await.map_err(|err| {
-            fx_log_err!(
-                "error opening package after fetching it {}: {:#}",
-                meta_far_blob.blob_id,
-                anyhow!(err)
-            );
+    if let Some((dir, scope)) = dir_and_scope {
+        let () = package_directory::serve(
+            scope,
+            blobfs.clone(),
+            meta_far_blob.blob_id.into(),
+            make_pkgdir_flags(executability_status(
+                executability_restrictions,
+                &package_status,
+                non_static_allow_list,
+            )),
+            dir,
+        )
+        .map_err(|e| {
+            fx_log_err!("get: error serving package {}: {:#}", meta_far_blob.blob_id, anyhow!(e));
             cobalt_sender.log_event_count(
                 metrics::PKG_CACHE_OPEN_METRIC_ID,
                 metrics::PkgCacheOpenMetricDimensionResult::Io,
@@ -235,20 +252,8 @@
                 1,
             );
             Status::INTERNAL
-        })?
-    };
-
-    if let Some(dir_request) = dir_request {
-        pkg.reopen(dir_request).map_err(|err| {
-            fx_log_err!("error reopening {}: {:#}", meta_far_blob.blob_id, anyhow!(err));
-            cobalt_sender.log_event_count(
-                metrics::PKG_CACHE_OPEN_METRIC_ID,
-                metrics::PkgCacheOpenMetricDimensionResult::Io,
-                0,
-                1,
-            );
-            Status::INTERNAL
-        })?;
+        })
+        .await?;
     }
 
     cobalt_sender.log_event_count(
@@ -262,36 +267,42 @@
 
 /// Open a package directory.
 async fn open<'a>(
-    pkgfs_versions: &'a pkgfs::versions::Client,
-    meta_far_blob_id: BlobId,
+    package_index: &async_lock::RwLock<PackageIndex>,
+    base_packages: &BasePackages,
+    executability_restrictions: system_image::ExecutabilityRestrictions,
+    non_static_allow_list: &system_image::NonStaticAllowList,
+    scope: package_directory::ExecutionScope,
+    blobfs: &blobfs::Client,
+    meta_far: Hash,
     dir_request: ServerEnd<fio::DirectoryMarker>,
     mut cobalt_sender: CobaltSender,
 ) -> Result<(), Status> {
-    let pkg =
-        pkgfs_versions.open_package(&meta_far_blob_id.into()).await.map_err(|err| match err {
-            pkgfs::versions::OpenError::NotFound => {
-                cobalt_sender.log_event_count(
-                    metrics::PKG_CACHE_OPEN_METRIC_ID,
-                    metrics::PkgCacheOpenMetricDimensionResult::NotFound,
-                    0,
-                    1,
-                );
-                Status::NOT_FOUND
-            }
-            err => {
-                cobalt_sender.log_event_count(
-                    metrics::PKG_CACHE_OPEN_METRIC_ID,
-                    metrics::PkgCacheOpenMetricDimensionResult::Io,
-                    0,
-                    1,
-                );
-                fx_log_err!("error opening {}: {:?}", meta_far_blob_id, err);
-                Status::INTERNAL
-            }
-        })?;
+    let package_status = match get_package_status(base_packages, package_index, &meta_far).await {
+        PackageStatus::Other => {
+            cobalt_sender.log_event_count(
+                metrics::PKG_CACHE_OPEN_METRIC_ID,
+                metrics::PkgCacheOpenMetricDimensionResult::NotFound,
+                0,
+                1,
+            );
+            return Err(Status::NOT_FOUND);
+        }
+        ps @ PackageStatus::Base | ps @ PackageStatus::Active(_) => ps,
+    };
 
-    pkg.reopen(dir_request).map_err(|err| {
-        fx_log_err!("error opening {}: {:#}", meta_far_blob_id, anyhow!(err));
+    let () = package_directory::serve(
+        scope,
+        blobfs.clone(),
+        meta_far,
+        make_pkgdir_flags(executability_status(
+            executability_restrictions,
+            &package_status,
+            non_static_allow_list,
+        )),
+        dir_request,
+    )
+    .map_err(|e| {
+        fx_log_err!("open: error serving package {}: {:#}", meta_far, anyhow!(e));
         cobalt_sender.log_event_count(
             metrics::PKG_CACHE_OPEN_METRIC_ID,
             metrics::PkgCacheOpenMetricDimensionResult::Io,
@@ -299,7 +310,8 @@
             1,
         );
         Status::INTERNAL
-    })?;
+    })
+    .await?;
 
     cobalt_sender.log_event_count(
         metrics::PKG_CACHE_OPEN_METRIC_ID,
@@ -349,9 +361,6 @@
 
     #[error("while updating package index with meta far info")]
     FulfillMetaFar(#[from] FulfillMetaFarError),
-
-    #[error("while activating the package in pkgfs")]
-    Activate(#[from] PokePkgfsError),
 }
 
 #[derive(Debug)]
@@ -386,15 +395,15 @@
 ///
 /// Once all needed blobs are written by the client, the package cache will complete the pending
 /// [`PackageCache.Get`] request and close this channel with a `ZX_OK` epitaph.
+///
+/// Returns the package's name if the package was activated in the dynamic index.
 async fn serve_needed_blobs(
     mut stream: NeededBlobsRequestStream,
     meta_far_info: BlobInfo,
-    pkgfs_install: &pkgfs::install::Client,
-    pkgfs_needs: &pkgfs::needs::Client,
-    package_index: &Arc<async_lock::RwLock<PackageIndex>>,
+    package_index: &async_lock::RwLock<PackageIndex>,
     blobfs: &blobfs::Client,
     node: &finspect::Node,
-) -> Result<(), ServeNeededBlobsError> {
+) -> Result<Option<fuchsia_pkg::PackageName>, ServeNeededBlobsError> {
     let state = node.create_string("state", "need-meta-far");
     let res = async {
         // Step 1: Open and write the meta.far, or determine it is not needed.
@@ -411,20 +420,18 @@
         // Step 3: Open and write all needed data blobs.
         let () = handle_open_blobs(&mut stream, needs, blobfs, &node).await?;
 
-        // Step 4: Start an install for this package through pkgfs, expecting it to discover no
-        // work is needed and start serving the package's pkg dir at /pkgfs/versions/<merkle>.
-        let () = poke_pkgfs(pkgfs_install, pkgfs_needs, meta_far_info).await?;
-
         serve_iterator.await;
         Ok(())
     }
     .await;
 
-    if res.is_ok() {
-        package_index.write().await.complete_install(meta_far_info.blob_id.into())?;
-    } else {
-        package_index.write().await.cancel_install(&meta_far_info.blob_id.into());
-    }
+    let res = match res {
+        Ok(()) => Ok(package_index.write().await.complete_install(meta_far_info.blob_id.into())?),
+        Err(e) => {
+            package_index.write().await.cancel_install(&meta_far_info.blob_id.into());
+            Err(e)
+        }
+    };
 
     // TODO in the Err(_) case, a responder was likely dropped, which would have already shutdown
     // the stream without our custom epitaph value.  Need to find a nice way to always shutdown
@@ -443,7 +450,7 @@
     stream: &mut NeededBlobsRequestStream,
     meta_far_info: BlobInfo,
     blobfs: &blobfs::Client,
-    package_index: &Arc<async_lock::RwLock<PackageIndex>>,
+    package_index: &async_lock::RwLock<PackageIndex>,
     state: &StringProperty,
 ) -> Result<HashSet<Hash>, ServeNeededBlobsError> {
     let hash = meta_far_info.blob_id.into();
@@ -621,60 +628,6 @@
     Ok(())
 }
 
-async fn poke_pkgfs(
-    pkgfs_install: &pkgfs::install::Client,
-    pkgfs_needs: &pkgfs::needs::Client,
-    meta_far_info: BlobInfo,
-) -> Result<(), PokePkgfsError> {
-    // Try to create the meta far blob, expecting it to fail with already_exists, indicating the
-    // meta far blob is readable in blobfs. Pkgfs will then import the package, populating
-    // /pkgfs/needs/<merkle> with any missing content blobs, which we expect to be empty.
-    let () = match pkgfs_install.create_blob(meta_far_info.blob_id.into(), BlobKind::Package).await
-    {
-        Err(pkgfs::install::BlobCreateError::AlreadyExists) => Ok(()),
-        Ok((_blob, closer)) => {
-            let () = closer.close().await;
-            Err(PokePkgfsError::MetaFarWritable)
-        }
-        Err(e) => Err(PokePkgfsError::UnexpectedMetaFarCreateError(e)),
-    }?;
-
-    // Verify that /pkgfs/needs/<merkle> is empty or missing, failing with its contents if it is
-    // not.
-    let needs = {
-        let needs = pkgfs_needs.list_needs(meta_far_info.blob_id.into());
-        futures::pin_mut!(needs);
-        match needs.try_next().await.map_err(PokePkgfsError::ListNeeds)? {
-            Some(needs) => {
-                let mut needs = needs.into_iter().collect::<Vec<_>>();
-                needs.sort_unstable();
-                needs
-            }
-            None => vec![],
-        }
-    };
-    if !needs.is_empty() {
-        return Err(PokePkgfsError::UnexpectedNeeds(needs));
-    }
-
-    Ok(())
-}
-
-#[derive(thiserror::Error, Debug)]
-enum PokePkgfsError {
-    #[error("the meta far should be read-only, but it is writable")]
-    MetaFarWritable,
-
-    #[error("the meta far failed to open with an unexpected error")]
-    UnexpectedMetaFarCreateError(#[source] pkgfs::install::BlobCreateError),
-
-    #[error("while listing needs")]
-    ListNeeds(#[source] pkgfs::needs::ListNeedsError),
-
-    #[error("the package should have all blobs present on disk, but some were not ({0:?})")]
-    UnexpectedNeeds(Vec<Hash>),
-}
-
 #[derive(Debug)]
 enum OpenWriteBlobError {
     NonFatalWrite(ServeWriteBlobError),
@@ -1066,8 +1019,6 @@
 
         let meta_blob_info = BlobInfo { blob_id: [0; 32].into(), length: 0 };
 
-        let (pkgfs_install, _) = pkgfs::install::Client::new_test();
-        let (pkgfs_needs, _) = pkgfs::needs::Client::new_test();
         let (blobfs, _) = blobfs::Client::new_test();
         let inspector = finspect::Inspector::new();
         let package_index = Arc::new(async_lock::RwLock::new(PackageIndex::new(
@@ -1078,8 +1029,6 @@
             serve_needed_blobs(
                 stream,
                 meta_blob_info,
-                &pkgfs_install,
-                &pkgfs_needs,
                 &package_index,
                 &blobfs,
                 &inspector.root().create_child("test-node-name")
@@ -1091,18 +1040,10 @@
 
     fn spawn_serve_needed_blobs_with_mocks(
         meta_blob_info: BlobInfo,
-    ) -> (
-        Task<Result<(), ServeNeededBlobsError>>,
-        NeededBlobsProxy,
-        pkgfs::install::Mock,
-        pkgfs::needs::Mock,
-        blobfs::Mock,
-    ) {
+    ) -> (Task<Result<(), ServeNeededBlobsError>>, NeededBlobsProxy, blobfs::Mock) {
         let (proxy, stream) =
             fidl::endpoints::create_proxy_and_stream::<NeededBlobsMarker>().unwrap();
 
-        let (pkgfs_install, pkgfs_install_mock) = pkgfs::install::Client::new_mock();
-        let (pkgfs_needs, pkgfs_needs_mock) = pkgfs::needs::Client::new_mock();
         let (blobfs, blobfs_mock) = blobfs::Client::new_mock();
         let inspector = finspect::Inspector::new();
         let package_index = Arc::new(async_lock::RwLock::new(PackageIndex::new(
@@ -1114,17 +1055,14 @@
                 serve_needed_blobs(
                     stream,
                     meta_blob_info,
-                    &pkgfs_install,
-                    &pkgfs_needs,
                     &package_index,
                     &blobfs,
                     &inspector.root().create_child("test-node-name"),
                 )
                 .await
+                .map(|_| ())
             }),
             proxy,
-            pkgfs_install_mock,
-            pkgfs_needs_mock,
             blobfs_mock,
         )
     }
@@ -1188,8 +1126,7 @@
     async fn expects_open_meta_blob() {
         let meta_blob_info = BlobInfo { blob_id: [0; 32].into(), length: 0 };
 
-        let (task, proxy, pkgfs_install, pkgfs_needs, blobfs) =
-            spawn_serve_needed_blobs_with_mocks(meta_blob_info);
+        let (task, proxy, blobfs) = spawn_serve_needed_blobs_with_mocks(meta_blob_info);
 
         let (iter, iter_server_end) =
             fidl::endpoints::create_proxy::<BlobInfoIteratorMarker>().unwrap();
@@ -1206,16 +1143,13 @@
                 expected: "open_meta_blob"
             })
         );
-        pkgfs_install.expect_done().await;
-        pkgfs_needs.expect_done().await;
         blobfs.expect_done().await;
     }
 
     #[fuchsia_async::run_singlethreaded(test)]
     async fn expects_open_meta_blob_once() {
         let meta_blob_info = BlobInfo { blob_id: [0; 32].into(), length: 4 };
-        let (task, proxy, pkgfs_install, pkgfs_needs, mut blobfs) =
-            spawn_serve_needed_blobs_with_mocks(meta_blob_info);
+        let (task, proxy, mut blobfs) = spawn_serve_needed_blobs_with_mocks(meta_blob_info);
 
         // Open a needed meta FAR blob and write it.
         let ((), ()) = future::join(
@@ -1268,16 +1202,13 @@
                 expected: "get_missing_blobs"
             })
         );
-        pkgfs_install.expect_done().await;
-        pkgfs_needs.expect_done().await;
         blobfs.expect_done().await;
     }
 
     #[fuchsia_async::run_singlethreaded(test)]
     async fn handles_present_meta_blob() {
         let meta_blob_info = BlobInfo { blob_id: [0; 32].into(), length: 0 };
-        let (task, proxy, pkgfs_install, pkgfs_needs, mut blobfs) =
-            spawn_serve_needed_blobs_with_mocks(meta_blob_info);
+        let (task, proxy, mut blobfs) = spawn_serve_needed_blobs_with_mocks(meta_blob_info);
 
         // Try to open the meta FAR blob, but report it is no longer needed.
         let ((), ()) = future::join(
@@ -1321,16 +1252,13 @@
                 expected: "get_missing_blobs"
             })
         );
-        pkgfs_install.expect_done().await;
-        pkgfs_needs.expect_done().await;
         blobfs.expect_done().await;
     }
 
     #[fuchsia_async::run_singlethreaded(test)]
     async fn allows_retrying_nonfatal_open_meta_blob_errors() {
         let meta_blob_info = BlobInfo { blob_id: [0; 32].into(), length: 1 };
-        let (task, proxy, pkgfs_install, pkgfs_needs, mut blobfs) =
-            spawn_serve_needed_blobs_with_mocks(meta_blob_info);
+        let (task, proxy, mut blobfs) = spawn_serve_needed_blobs_with_mocks(meta_blob_info);
 
         // Try to open the meta FAR blob, but report it is already being written concurrently.
         let ((), ()) = future::join(
@@ -1453,8 +1381,6 @@
                 expected: "get_missing_blobs"
             })
         );
-        pkgfs_install.expect_done().await;
-        pkgfs_needs.expect_done().await;
         blobfs.expect_done().await;
     }
 
@@ -1503,21 +1429,6 @@
         .await;
     }
 
-    pub(super) async fn succeed_poke_pkgfs(
-        pkgfs_install: &mut pkgfs::install::Mock,
-        pkgfs_needs: &mut pkgfs::needs::Mock,
-        meta_blob_info: BlobInfo,
-    ) {
-        let meta_hash = meta_blob_info.blob_id.into();
-
-        pkgfs_install
-            .expect_create_blob(meta_hash, BlobKind::Package.into())
-            .await
-            .fail_open_with_already_exists();
-
-        pkgfs_needs.expect_enumerate_needs(meta_hash).await.fail_open_with_not_found().await;
-    }
-
     async fn collect_blob_info_iterator(proxy: BlobInfoIteratorProxy) -> Vec<BlobInfo> {
         let mut res = vec![];
 
@@ -1537,8 +1448,7 @@
     #[fuchsia_async::run_singlethreaded(test)]
     async fn discovers_and_reports_missing_blobs() {
         let meta_blob_info = BlobInfo { blob_id: [0; 32].into(), length: 0 };
-        let (task, proxy, pkgfs_install, pkgfs_needs, mut blobfs) =
-            spawn_serve_needed_blobs_with_mocks(meta_blob_info);
+        let (task, proxy, mut blobfs) = spawn_serve_needed_blobs_with_mocks(meta_blob_info);
 
         let expected = HashRangeFull::default().skip(1).take(2000).collect::<Vec<_>>();
 
@@ -1573,16 +1483,13 @@
             task.await,
             Err(ServeNeededBlobsError::UnexpectedClose("handle_open_blobs"))
         );
-        pkgfs_install.expect_done().await;
-        pkgfs_needs.expect_done().await;
         blobfs.expect_done().await;
     }
 
     #[fuchsia_async::run_singlethreaded(test)]
     async fn handles_no_missing_blobs() {
         let meta_blob_info = BlobInfo { blob_id: [0; 32].into(), length: 0 };
-        let (task, proxy, mut pkgfs_install, mut pkgfs_needs, mut blobfs) =
-            spawn_serve_needed_blobs_with_mocks(meta_blob_info);
+        let (task, proxy, mut blobfs) = spawn_serve_needed_blobs_with_mocks(meta_blob_info);
 
         write_meta_blob(&proxy, &mut blobfs, meta_blob_info, vec![]).await;
 
@@ -1592,23 +1499,18 @@
         let missing_blobs = collect_blob_info_iterator(missing_blobs_iter).await;
         assert_eq!(missing_blobs, vec![]);
 
-        succeed_poke_pkgfs(&mut pkgfs_install, &mut pkgfs_needs, meta_blob_info).await;
-
         assert_matches!(task.await, Ok(()));
         assert_matches!(
             proxy.take_event_stream().next().await,
             Some(Err(fidl::Error::ClientChannelClosed { status: Status::OK, .. }))
         );
-        pkgfs_install.expect_done().await;
-        pkgfs_needs.expect_done().await;
         blobfs.expect_done().await;
     }
 
     #[fuchsia_async::run_singlethreaded(test)]
     async fn fails_on_invalid_meta_far() {
         let meta_blob_info = BlobInfo { blob_id: [0; 32].into(), length: 0 };
-        let (task, proxy, pkgfs_install, pkgfs_needs, mut blobfs) =
-            spawn_serve_needed_blobs_with_mocks(meta_blob_info);
+        let (task, proxy, mut blobfs) = spawn_serve_needed_blobs_with_mocks(meta_blob_info);
 
         let bogus_far_data = b"this is not a far file";
 
@@ -1649,118 +1551,13 @@
                 _
             )),)
         );
-        pkgfs_install.expect_done().await;
-        pkgfs_needs.expect_done().await;
-        blobfs.expect_done().await;
-    }
-
-    #[fuchsia_async::run_singlethreaded(test)]
-    async fn fails_on_pkgfs_install_unexpected_create_error() {
-        let meta_blob_info = BlobInfo { blob_id: [0; 32].into(), length: 0 };
-        let (task, proxy, mut pkgfs_install, pkgfs_needs, mut blobfs) =
-            spawn_serve_needed_blobs_with_mocks(meta_blob_info);
-
-        write_meta_blob(&proxy, &mut blobfs, meta_blob_info, vec![]).await;
-
-        let (missing_blobs_iter, missing_blobs_iter_server_end) =
-            fidl::endpoints::create_proxy::<BlobInfoIteratorMarker>().unwrap();
-        assert_matches!(proxy.get_missing_blobs(missing_blobs_iter_server_end), Ok(()));
-        let missing_blobs = collect_blob_info_iterator(missing_blobs_iter).await;
-        assert_eq!(missing_blobs, vec![]);
-
-        // Indicate the meta far is not present, even though we just wrote it.
-        pkgfs_install
-            .expect_create_blob(meta_blob_info.blob_id.into(), BlobKind::Package.into())
-            .await
-            .fail_open_with_concurrent_write();
-
-        drop(proxy);
-        assert_matches!(
-            task.await,
-            Err(ServeNeededBlobsError::Activate(PokePkgfsError::UnexpectedMetaFarCreateError(_)))
-        );
-        pkgfs_install.expect_done().await;
-        pkgfs_needs.expect_done().await;
-        blobfs.expect_done().await;
-    }
-
-    #[fuchsia_async::run_singlethreaded(test)]
-    async fn fails_on_needs_enumeration_error() {
-        let meta_blob_info = BlobInfo { blob_id: [0; 32].into(), length: 0 };
-        let (task, proxy, mut pkgfs_install, mut pkgfs_needs, mut blobfs) =
-            spawn_serve_needed_blobs_with_mocks(meta_blob_info);
-
-        write_meta_blob(&proxy, &mut blobfs, meta_blob_info, vec![]).await;
-
-        let (missing_blobs_iter, missing_blobs_iter_server_end) =
-            fidl::endpoints::create_proxy::<BlobInfoIteratorMarker>().unwrap();
-        assert_matches!(proxy.get_missing_blobs(missing_blobs_iter_server_end), Ok(()));
-        let missing_blobs = collect_blob_info_iterator(missing_blobs_iter).await;
-        assert_eq!(missing_blobs, vec![]);
-
-        // Indicate the meta far is present, but then fail to enumerate needs in an unexpected way.
-        pkgfs_install
-            .expect_create_blob(meta_blob_info.blob_id.into(), BlobKind::Package.into())
-            .await
-            .fail_open_with_already_exists();
-        pkgfs_needs
-            .expect_enumerate_needs(meta_blob_info.blob_id.into())
-            .await
-            .fail_open_with_unexpected_error()
-            .await;
-
-        drop(proxy);
-        assert_matches!(
-            task.await,
-            Err(ServeNeededBlobsError::Activate(PokePkgfsError::ListNeeds(_)))
-        );
-        pkgfs_install.expect_done().await;
-        pkgfs_needs.expect_done().await;
-        blobfs.expect_done().await;
-    }
-
-    #[fuchsia_async::run_singlethreaded(test)]
-    async fn fails_on_any_pkgfs_needs() {
-        let meta_blob_info = BlobInfo { blob_id: [0; 32].into(), length: 0 };
-        let (task, proxy, mut pkgfs_install, mut pkgfs_needs, mut blobfs) =
-            spawn_serve_needed_blobs_with_mocks(meta_blob_info);
-
-        write_meta_blob(&proxy, &mut blobfs, meta_blob_info, vec![]).await;
-
-        let (missing_blobs_iter, missing_blobs_iter_server_end) =
-            fidl::endpoints::create_proxy::<BlobInfoIteratorMarker>().unwrap();
-        assert_matches!(proxy.get_missing_blobs(missing_blobs_iter_server_end), Ok(()));
-        let missing_blobs = collect_blob_info_iterator(missing_blobs_iter).await;
-        assert_eq!(missing_blobs, vec![]);
-
-        // Indicate the meta far is present, but then indicate that some blobs are needed, which
-        // shouldn't be possible.
-        let unexpected_needs = HashRangeFull::default().take(10).collect::<Vec<_>>();
-        pkgfs_install
-            .expect_create_blob(meta_blob_info.blob_id.into(), BlobKind::Package.into())
-            .await
-            .fail_open_with_already_exists();
-        pkgfs_needs
-            .expect_enumerate_needs(meta_blob_info.blob_id.into())
-            .await
-            .enumerate_needs(unexpected_needs.iter().copied().collect())
-            .await;
-
-        drop(proxy);
-        assert_matches!(
-            task.await,
-            Err(ServeNeededBlobsError::Activate(PokePkgfsError::UnexpectedNeeds(needs))) if needs == unexpected_needs
-        );
-        pkgfs_install.expect_done().await;
-        pkgfs_needs.expect_done().await;
         blobfs.expect_done().await;
     }
 
     #[fuchsia_async::run_singlethreaded(test)]
     async fn dropping_needed_blobs_stops_missing_blob_iterator() {
         let meta_blob_info = BlobInfo { blob_id: [0; 32].into(), length: 0 };
-        let (task, proxy, pkgfs_install, pkgfs_needs, mut blobfs) =
-            spawn_serve_needed_blobs_with_mocks(meta_blob_info);
+        let (task, proxy, mut blobfs) = spawn_serve_needed_blobs_with_mocks(meta_blob_info);
 
         let missing = HashRangeFull::default().take(10).collect::<Vec<_>>();
         write_meta_blob(&proxy, &mut blobfs, meta_blob_info, missing.iter().copied()).await;
@@ -1791,16 +1588,13 @@
             task.await,
             Err(ServeNeededBlobsError::UnexpectedClose("handle_open_blobs"))
         );
-        pkgfs_install.expect_done().await;
-        pkgfs_needs.expect_done().await;
         blobfs.expect_done().await;
     }
 
     #[fuchsia_async::run_singlethreaded(test)]
     async fn expects_get_missing_blobs_once() {
         let meta_blob_info = BlobInfo { blob_id: [0; 32].into(), length: 0 };
-        let (task, proxy, pkgfs_install, pkgfs_needs, mut blobfs) =
-            spawn_serve_needed_blobs_with_mocks(meta_blob_info);
+        let (task, proxy, mut blobfs) = spawn_serve_needed_blobs_with_mocks(meta_blob_info);
 
         let missing = HashRangeFull::default().take(10).collect::<Vec<_>>();
         write_meta_blob(&proxy, &mut blobfs, meta_blob_info, missing.iter().copied()).await;
@@ -1835,8 +1629,6 @@
                 expected: "open_blob"
             })
         );
-        pkgfs_install.expect_done().await;
-        pkgfs_needs.expect_done().await;
         blobfs.expect_done().await;
     }
 
@@ -1877,8 +1669,7 @@
     #[fuchsia_async::run_singlethreaded(test)]
     async fn single_need() {
         let meta_blob_info = BlobInfo { blob_id: [1; 32].into(), length: 0 };
-        let (task, proxy, mut pkgfs_install, mut pkgfs_needs, mut blobfs) =
-            spawn_serve_needed_blobs_with_mocks(meta_blob_info);
+        let (task, proxy, mut blobfs) = spawn_serve_needed_blobs_with_mocks(meta_blob_info);
 
         write_meta_blob(&proxy, &mut blobfs, meta_blob_info, vec![[2; 32].into()]).await;
         enumerate_readable_missing_blobs(
@@ -1926,23 +1717,18 @@
         )
         .await;
 
-        succeed_poke_pkgfs(&mut pkgfs_install, &mut pkgfs_needs, meta_blob_info).await;
-
         assert_matches!(task.await, Ok(()));
         assert_matches!(
             proxy.take_event_stream().next().await,
             Some(Err(fidl::Error::ClientChannelClosed { status: Status::OK, .. }))
         );
-        pkgfs_install.expect_done().await;
-        pkgfs_needs.expect_done().await;
         blobfs.expect_done().await;
     }
 
     #[fuchsia_async::run_singlethreaded(test)]
     async fn expects_open_blob_per_blob_once() {
         let meta_blob_info = BlobInfo { blob_id: [1; 32].into(), length: 0 };
-        let (task, proxy, pkgfs_install, pkgfs_needs, mut blobfs) =
-            spawn_serve_needed_blobs_with_mocks(meta_blob_info);
+        let (task, proxy, mut blobfs) = spawn_serve_needed_blobs_with_mocks(meta_blob_info);
 
         write_meta_blob(&proxy, &mut blobfs, meta_blob_info, vec![[2; 32].into()]).await;
         enumerate_readable_missing_blobs(
@@ -1978,16 +1764,13 @@
 
         assert_matches!(task.await, Err(ServeNeededBlobsError::BlobNotNeeded(hash)) if hash == [2; 32].into());
         assert_matches!(proxy.take_event_stream().next().await, None);
-        pkgfs_install.expect_done().await;
-        pkgfs_needs.expect_done().await;
         blobfs.expect_done().await;
     }
 
     #[fuchsia_async::run_singlethreaded(test)]
     async fn handles_many_content_blobs_that_need_written() {
         let meta_blob_info = BlobInfo { blob_id: [0; 32].into(), length: 0 };
-        let (task, proxy, mut pkgfs_install, mut pkgfs_needs, mut blobfs) =
-            spawn_serve_needed_blobs_with_mocks(meta_blob_info);
+        let (task, proxy, mut blobfs) = spawn_serve_needed_blobs_with_mocks(meta_blob_info);
 
         let content_blobs = || HashRangeFull::default().skip(1).take(100);
 
@@ -2047,23 +1830,18 @@
         )
         .await;
 
-        succeed_poke_pkgfs(&mut pkgfs_install, &mut pkgfs_needs, meta_blob_info).await;
-
         assert_matches!(task.await, Ok(()));
         assert_matches!(
             proxy.take_event_stream().next().await,
             Some(Err(fidl::Error::ClientChannelClosed { status: Status::OK, .. }))
         );
-        pkgfs_install.expect_done().await;
-        pkgfs_needs.expect_done().await;
         blobfs.expect_done().await;
     }
 
     #[fuchsia_async::run_singlethreaded(test)]
     async fn handles_many_content_blobs_that_are_already_present() {
         let meta_blob_info = BlobInfo { blob_id: [0; 32].into(), length: 0 };
-        let (task, proxy, mut pkgfs_install, mut pkgfs_needs, mut blobfs) =
-            spawn_serve_needed_blobs_with_mocks(meta_blob_info);
+        let (task, proxy, mut blobfs) = spawn_serve_needed_blobs_with_mocks(meta_blob_info);
 
         let content_blobs = || HashRangeFull::default().skip(1).take(100);
 
@@ -2097,23 +1875,18 @@
         )
         .await;
 
-        succeed_poke_pkgfs(&mut pkgfs_install, &mut pkgfs_needs, meta_blob_info).await;
-
         assert_matches!(task.await, Ok(()));
         assert_matches!(
             proxy.take_event_stream().next().await,
             Some(Err(fidl::Error::ClientChannelClosed { status: Status::OK, .. }))
         );
-        pkgfs_install.expect_done().await;
-        pkgfs_needs.expect_done().await;
         blobfs.expect_done().await;
     }
 
     #[fuchsia_async::run_singlethreaded(test)]
     async fn allows_retrying_nonfatal_open_blob_errors() {
         let meta_blob_info = BlobInfo { blob_id: [0; 32].into(), length: 0 };
-        let (task, proxy, mut pkgfs_install, mut pkgfs_needs, mut blobfs) =
-            spawn_serve_needed_blobs_with_mocks(meta_blob_info);
+        let (task, proxy, mut blobfs) = spawn_serve_needed_blobs_with_mocks(meta_blob_info);
 
         let content_blob = Hash::from([1; 32]);
 
@@ -2239,20 +2012,15 @@
         )
         .await;
 
-        succeed_poke_pkgfs(&mut pkgfs_install, &mut pkgfs_needs, meta_blob_info).await;
-
         // That was the only data blob, so the operation is now done.
         assert_matches!(task.await, Ok(()));
-        pkgfs_install.expect_done().await;
-        pkgfs_needs.expect_done().await;
         blobfs.expect_done().await;
     }
 
     #[fuchsia_async::run_singlethreaded(test)]
     async fn abort_aborts_while_waiting_for_open_meta_blob() {
         let meta_blob_info = BlobInfo { blob_id: [0; 32].into(), length: 0 };
-        let (task, proxy, pkgfs_install, pkgfs_needs, blobfs) =
-            spawn_serve_needed_blobs_with_mocks(meta_blob_info);
+        let (task, proxy, blobfs) = spawn_serve_needed_blobs_with_mocks(meta_blob_info);
 
         let abort_fut = proxy.abort();
 
@@ -2261,16 +2029,13 @@
             abort_fut.await,
             Err(fidl::Error::ClientChannelClosed { status: Status::PEER_CLOSED, .. })
         );
-        pkgfs_install.expect_done().await;
-        pkgfs_needs.expect_done().await;
         blobfs.expect_done().await;
     }
 
     #[fuchsia_async::run_singlethreaded(test)]
     async fn abort_waits_for_pending_blob_writes_before_responding() {
         let meta_blob_info = BlobInfo { blob_id: [0; 32].into(), length: 0 };
-        let (task, proxy, pkgfs_install, pkgfs_needs, mut blobfs) =
-            spawn_serve_needed_blobs_with_mocks(meta_blob_info);
+        let (task, proxy, mut blobfs) = spawn_serve_needed_blobs_with_mocks(meta_blob_info);
 
         let content_blob = Hash::from([1; 32]);
 
@@ -2346,16 +2111,13 @@
         );
 
         assert_matches!(task.await, Err(ServeNeededBlobsError::Aborted));
-        pkgfs_install.expect_done().await;
-        pkgfs_needs.expect_done().await;
         blobfs.expect_done().await;
     }
 
     #[fuchsia_async::run_singlethreaded(test)]
     async fn abort_aborts_while_waiting_for_get_missing_blobs() {
         let meta_blob_info = BlobInfo { blob_id: [0; 32].into(), length: 0 };
-        let (task, proxy, pkgfs_install, pkgfs_needs, mut blobfs) =
-            spawn_serve_needed_blobs_with_mocks(meta_blob_info);
+        let (task, proxy, mut blobfs) = spawn_serve_needed_blobs_with_mocks(meta_blob_info);
 
         write_meta_blob(&proxy, &mut blobfs, meta_blob_info, vec![]).await;
 
@@ -2366,16 +2128,13 @@
             abort_fut.await,
             Err(fidl::Error::ClientChannelClosed { status: Status::PEER_CLOSED, .. })
         );
-        pkgfs_install.expect_done().await;
-        pkgfs_needs.expect_done().await;
         blobfs.expect_done().await;
     }
 
     #[fuchsia_async::run_singlethreaded(test)]
     async fn abort_aborts_while_waiting_for_open_blobs() {
         let meta_blob_info = BlobInfo { blob_id: [0; 32].into(), length: 0 };
-        let (task, proxy, pkgfs_install, pkgfs_needs, mut blobfs) =
-            spawn_serve_needed_blobs_with_mocks(meta_blob_info);
+        let (task, proxy, mut blobfs) = spawn_serve_needed_blobs_with_mocks(meta_blob_info);
 
         write_meta_blob(&proxy, &mut blobfs, meta_blob_info, vec![[2; 32].into()]).await;
         enumerate_readable_missing_blobs(
@@ -2393,200 +2152,43 @@
             abort_fut.await,
             Err(fidl::Error::ClientChannelClosed { status: Status::PEER_CLOSED, .. })
         );
-        pkgfs_install.expect_done().await;
-        pkgfs_needs.expect_done().await;
         blobfs.expect_done().await;
     }
 }
 
 #[cfg(test)]
 mod get_handler_tests {
-    use super::serve_needed_blobs_tests::*;
     use super::*;
-    use assert_matches::assert_matches;
-    use fidl_fuchsia_pkg::NeededBlobsProxy;
-    use fuchsia_cobalt::{CobaltConnector, ConnectionType};
-    use fuchsia_inspect as finspect;
-
-    fn spawn_get_with_mocks(
-        meta_blob_info: BlobInfo,
-        dir_request: Option<ServerEnd<fio::DirectoryMarker>>,
-    ) -> (
-        Task<Result<(), Status>>,
-        NeededBlobsProxy,
-        pkgfs::versions::Mock,
-        pkgfs::install::Mock,
-        pkgfs::needs::Mock,
-        blobfs::Mock,
-    ) {
-        let (proxy, stream) = fidl::endpoints::create_proxy::<NeededBlobsMarker>().unwrap();
-
-        let (pkgfs_versions, pkgfs_versions_mock) = pkgfs::versions::Client::new_mock();
-        let (pkgfs_install, pkgfs_install_mock) = pkgfs::install::Client::new_mock();
-        let (pkgfs_needs, pkgfs_needs_mock) = pkgfs::needs::Client::new_mock();
-        let (blobfs, blobfs_mock) = blobfs::Client::new_mock();
-        let inspector = finspect::Inspector::new();
-        let package_index = Arc::new(async_lock::RwLock::new(PackageIndex::new(
-            inspector.root().create_child("test_does_not_use_inspect "),
-        )));
-
-        let (cobalt_sender, _) =
-            CobaltConnector::default().serve(ConnectionType::project_id(metrics::PROJECT_ID));
-
-        (
-            Task::spawn(async move {
-                get(
-                    &pkgfs_versions,
-                    &pkgfs_install,
-                    &pkgfs_needs,
-                    &package_index,
-                    &blobfs,
-                    meta_blob_info,
-                    stream,
-                    dir_request,
-                    cobalt_sender,
-                    &inspector.root().create_child("test-node-name"),
-                )
-                .await
-            }),
-            proxy,
-            pkgfs_versions_mock,
-            pkgfs_install_mock,
-            pkgfs_needs_mock,
-            blobfs_mock,
-        )
-    }
 
     #[fuchsia_async::run_singlethreaded(test)]
     async fn everything_closed() {
         let (_, stream) = fidl::endpoints::create_proxy::<NeededBlobsMarker>().unwrap();
-
         let meta_blob_info = BlobInfo { blob_id: [0; 32].into(), length: 0 };
-
-        let (pkgfs_versions, _) = pkgfs::versions::Client::new_test();
-        let (pkgfs_install, _) = pkgfs::install::Client::new_test();
-        let (pkgfs_needs, _) = pkgfs::needs::Client::new_test();
         let (blobfs, _) = blobfs::Client::new_test();
-        let inspector = finspect::Inspector::new();
+        let inspector = fuchsia_inspect::Inspector::new();
         let package_index = Arc::new(async_lock::RwLock::new(PackageIndex::new(
             inspector.root().create_child("test_does_not_use_inspect "),
         )));
 
-        let (cobalt_sender, _) =
-            CobaltConnector::default().serve(ConnectionType::project_id(metrics::PROJECT_ID));
-
-        assert_matches!(
+        assert_matches::assert_matches!(
             get(
-                &pkgfs_versions,
-                &pkgfs_install,
-                &pkgfs_needs,
                 &package_index,
+                &BasePackages::new_test_only(HashSet::new(), vec![]),
+                system_image::ExecutabilityRestrictions::DoNotEnforce,
+                &system_image::NonStaticAllowList::empty(),
                 &blobfs,
                 meta_blob_info,
                 stream,
                 None,
-                cobalt_sender,
+                fuchsia_cobalt::CobaltConnector::default()
+                    .serve(fuchsia_cobalt::ConnectionType::project_id(metrics::PROJECT_ID))
+                    .0,
                 &inspector.root().create_child("get")
             )
             .await,
             Err(Status::UNAVAILABLE)
         );
     }
-
-    #[fuchsia_async::run_singlethreaded(test)]
-    async fn trivially_opens_present_package() {
-        let meta_blob_info = BlobInfo { blob_id: [0; 32].into(), length: 0 };
-
-        let (pkgdir, pkgdir_server_end) =
-            fidl::endpoints::create_proxy::<fio::DirectoryMarker>().unwrap();
-
-        let (task, proxy, mut pkgfs_versions, pkgfs_install, pkgfs_needs, blobfs) =
-            spawn_get_with_mocks(meta_blob_info, Some(pkgdir_server_end));
-
-        pkgfs_versions
-            .expect_open_package([0; 32].into())
-            .await
-            .succeed_open()
-            .await
-            .expect_clone()
-            .await
-            .verify_are_same_channel(pkgdir);
-        pkgfs_install.expect_done().await;
-        pkgfs_needs.expect_done().await;
-        blobfs.expect_done().await;
-        assert_eq!(task.await, Ok(()));
-        assert_matches!(
-            proxy.take_event_stream().next().await.unwrap(),
-            Err(fidl::Error::ClientChannelClosed { status: Status::OK, .. })
-        );
-    }
-
-    #[fuchsia_async::run_singlethreaded(test)]
-    async fn trivially_opens_present_package_even_if_needed_blobs_is_closed() {
-        let meta_blob_info = BlobInfo { blob_id: [0; 32].into(), length: 0 };
-
-        let (pkgdir, pkgdir_server_end) =
-            fidl::endpoints::create_proxy::<fio::DirectoryMarker>().unwrap();
-
-        let (task, proxy, mut pkgfs_versions, pkgfs_install, pkgfs_needs, blobfs) =
-            spawn_get_with_mocks(meta_blob_info, Some(pkgdir_server_end));
-
-        drop(proxy);
-
-        pkgfs_versions
-            .expect_open_package([0; 32].into())
-            .await
-            .succeed_open()
-            .await
-            .expect_clone()
-            .await
-            .verify_are_same_channel(pkgdir);
-        pkgfs_install.expect_done().await;
-        pkgfs_needs.expect_done().await;
-        blobfs.expect_done().await;
-        assert_eq!(task.await, Ok(()));
-    }
-
-    #[fuchsia_async::run_singlethreaded(test)]
-    async fn opens_missing_package_after_writing_blobs() {
-        let meta_blob_info = BlobInfo { blob_id: [0; 32].into(), length: 0 };
-
-        let (pkgdir, pkgdir_server_end) =
-            fidl::endpoints::create_proxy::<fio::DirectoryMarker>().unwrap();
-
-        let (task, proxy, mut pkgfs_versions, mut pkgfs_install, mut pkgfs_needs, mut blobfs) =
-            spawn_get_with_mocks(meta_blob_info, Some(pkgdir_server_end));
-
-        pkgfs_versions.expect_open_package([0; 32].into()).await.fail_open_with_not_found().await;
-
-        write_meta_blob(&proxy, &mut blobfs, meta_blob_info, vec![]).await;
-        enumerate_readable_missing_blobs(
-            &proxy,
-            &mut blobfs,
-            std::iter::empty(),
-            std::iter::empty(),
-        )
-        .await;
-        succeed_poke_pkgfs(&mut pkgfs_install, &mut pkgfs_needs, meta_blob_info).await;
-        assert_matches!(
-            proxy.take_event_stream().next().await.unwrap(),
-            Err(fidl::Error::ClientChannelClosed { status: Status::OK, .. })
-        );
-
-        pkgfs_versions
-            .expect_open_package([0; 32].into())
-            .await
-            .succeed_open()
-            .await
-            .expect_clone()
-            .await
-            .verify_are_same_channel(pkgdir);
-
-        pkgfs_install.expect_done().await;
-        pkgfs_needs.expect_done().await;
-        blobfs.expect_done().await;
-        assert_eq!(task.await, Ok(()));
-    }
 }
 
 #[cfg(test)]
diff --git a/src/sys/pkg/bin/pkg-cache/src/index/dynamic.rs b/src/sys/pkg/bin/pkg-cache/src/index/dynamic.rs
index 6cb1a4a..ab62711 100644
--- a/src/sys/pkg/bin/pkg-cache/src/index/dynamic.rs
+++ b/src/sys/pkg/bin/pkg-cache/src/index/dynamic.rs
@@ -173,8 +173,12 @@
     }
 
     /// Notifies dynamic index that the given package has completed installation.
-    pub fn complete_install(&mut self, package_hash: Hash) -> Result<(), CompleteInstallError> {
-        match self.packages.get_mut(&package_hash) {
+    /// Returns the package's name.
+    pub fn complete_install(
+        &mut self,
+        package_hash: Hash,
+    ) -> Result<PackageName, CompleteInstallError> {
+        let name = match self.packages.get_mut(&package_hash) {
             Some(PackageWithInspect {
                 package: package @ Package::WithMetaFar { .. },
                 package_node,
@@ -194,10 +198,12 @@
                         node: child_node,
                     };
 
+                    let name = path.name().clone();
                     if let Some(previous_package) = self.active_packages.insert(path, package_hash)
                     {
                         self.packages.remove(&previous_package);
                     }
+                    name
                 } else {
                     unreachable!()
                 }
@@ -207,8 +213,8 @@
                     package.map(|pwi| pwi.package.to_owned()),
                 ));
             }
-        }
-        Ok(())
+        };
+        Ok(name)
     }
 
     /// Notifies dynamic index that the given package installation has been canceled.
@@ -508,7 +514,8 @@
             Package::WithMetaFar { path: path.clone(), required_blobs: required_blobs.clone() },
         );
 
-        dynamic_index.complete_install(hash).unwrap();
+        let name = dynamic_index.complete_install(hash).unwrap();
+        assert_eq!(name, *path.name());
         assert_eq!(
             dynamic_index.packages(),
             hashmap! {
diff --git a/src/sys/pkg/bin/pkg-cache/src/index/package.rs b/src/sys/pkg/bin/pkg-cache/src/index/package.rs
index b12f24b..8c03b22 100644
--- a/src/sys/pkg/bin/pkg-cache/src/index/package.rs
+++ b/src/sys/pkg/bin/pkg-cache/src/index/package.rs
@@ -91,12 +91,17 @@
 
     /// Notifies the appropriate indices that the package with the given hash has completed
     /// installation.
-    pub fn complete_install(&mut self, package_hash: Hash) -> Result<(), CompleteInstallError> {
+    /// Returns the package's name if the package was activated in the dynamic index.
+    pub fn complete_install(
+        &mut self,
+        package_hash: Hash,
+    ) -> Result<Option<PackageName>, CompleteInstallError> {
         let is_retained = self.retained.contains_package(&package_hash);
 
         match self.dynamic.complete_install(package_hash) {
-            Err(_) if is_retained => Ok(()),
-            res => res,
+            Err(_) if is_retained => Ok(None),
+            Err(e) => Err(e),
+            Ok(name) => Ok(Some(name)),
         }
     }
 
@@ -150,7 +155,7 @@
 /// Notifies the appropriate inner indices that the given meta far blob is now available in blobfs.
 /// Do not use this for regular blob (unless it's also a meta far).
 pub async fn fulfill_meta_far_blob(
-    index: &Arc<async_lock::RwLock<PackageIndex>>,
+    index: &async_lock::RwLock<PackageIndex>,
     blobfs: &blobfs::Client,
     meta_hash: Hash,
 ) -> Result<HashSet<Hash>, FulfillMetaFarError> {
@@ -218,7 +223,7 @@
         }));
 
         index.fulfill_meta_far(hash(0), path("pending"), hashset! {hash(1)}).unwrap();
-        index.complete_install(hash(0)).unwrap();
+        assert_eq!(index.complete_install(hash(0)).unwrap(), Some("pending".parse().unwrap()));
 
         assert_eq!(
             index.retained.packages(),
@@ -260,8 +265,8 @@
             hash(1) => None,
         }));
 
-        index.complete_install(hash(0)).unwrap();
-        index.complete_install(hash(1)).unwrap();
+        assert_eq!(index.complete_install(hash(0)).unwrap(), Some("withmetafar1".parse().unwrap()));
+        assert_eq!(index.complete_install(hash(1)).unwrap(), Some("withmetafar2".parse().unwrap()));
 
         assert_eq!(
             index.retained.packages(),
@@ -302,7 +307,7 @@
 
         index.start_install(hash(0));
         index.fulfill_meta_far(hash(0), path("retaiendonly"), hashset! {hash(123)}).unwrap();
-        index.complete_install(hash(0)).unwrap();
+        assert_eq!(index.complete_install(hash(0)).unwrap(), None);
 
         assert_eq!(
             index.retained.packages(),
@@ -326,7 +331,7 @@
         // install a package not tracked by the retained index
         index.start_install(hash(2));
         index.fulfill_meta_far(hash(2), path("dynamic-only"), hashset! {hash(10)}).unwrap();
-        index.complete_install(hash(2)).unwrap();
+        assert_eq!(index.complete_install(hash(2)).unwrap(), Some("dynamic-only".parse().unwrap()));
 
         assert_eq!(
             index.retained.packages(),
@@ -423,8 +428,8 @@
         index.fulfill_meta_far(hash(0), path("pkg1"), hashset! {hash(10)}).unwrap();
         index.fulfill_meta_far(hash(1), path("pkg2"), hashset! {hash(11), hash(61)}).unwrap();
 
-        index.complete_install(hash(0)).unwrap();
-        index.complete_install(hash(1)).unwrap();
+        assert_eq!(index.complete_install(hash(0)).unwrap(), Some("pkg1".parse().unwrap()));
+        assert_eq!(index.complete_install(hash(1)).unwrap(), Some("pkg2".parse().unwrap()));
 
         assert_eq!(
             index.all_blobs(),
diff --git a/src/sys/pkg/bin/pkg-cache/src/main.rs b/src/sys/pkg/bin/pkg-cache/src/main.rs
index e0f7748..36d5863 100644
--- a/src/sys/pkg/bin/pkg-cache/src/main.rs
+++ b/src/sys/pkg/bin/pkg-cache/src/main.rs
@@ -61,21 +61,8 @@
     let Args { ignore_system_image } = argh::from_env();
 
     let inspector = finspect::Inspector::new();
-    let index_node = inspector.root().create_child("index");
-
-    let (cobalt_sender, cobalt_fut) = CobaltConnector { buffer_size: COBALT_CONNECTOR_BUFFER_SIZE }
-        .serve(ConnectionType::project_id(metrics::PROJECT_ID));
-    let cobalt_fut = Task::spawn(cobalt_fut);
-
-    let pkgfs_versions =
-        pkgfs::versions::Client::open_from_namespace().context("error opening /pkgfs/versions")?;
-    let pkgfs_install =
-        pkgfs::install::Client::open_from_namespace().context("error opening /pkgfs/install")?;
-    let pkgfs_needs =
-        pkgfs::needs::Client::open_from_namespace().context("error opening /pkgfs/needs")?;
-    let blobfs = blobfs::Client::open_from_namespace().context("error opening blobfs")?;
-
-    let mut package_index = PackageIndex::new(index_node);
+    let mut package_index = PackageIndex::new(inspector.root().create_child("index"));
+    let blobfs = blobfs::Client::open_from_namespace_rwx().context("error opening blobfs")?;
 
     let (
         system_image,
@@ -96,8 +83,6 @@
     } else {
         let boot_args = connect_to_protocol::<fidl_fuchsia_boot::ArgumentsMarker>()
             .context("error connecting to fuchsia.boot/Arguments")?;
-        // TODO(fxbug.dev/88871) Use a blobfs client with RX rights (instead of RW) to create
-        // system_image.
         let system_image = system_image::SystemImage::new(blobfs.clone(), &boot_args)
             .await
             .context("Accessing contents of system_image package")?;
@@ -145,20 +130,24 @@
         .record_child("non_static_allow_list", |n| non_static_allow_list.record_inspect(n));
 
     let base_packages = Arc::new(base_packages);
+    let non_static_allow_list = Arc::new(non_static_allow_list);
     let package_index = Arc::new(async_lock::RwLock::new(package_index));
+    let scope = vfs::execution_scope::ExecutionScope::new();
+    let (cobalt_sender, cobalt_fut) = CobaltConnector { buffer_size: COBALT_CONNECTOR_BUFFER_SIZE }
+        .serve(ConnectionType::project_id(metrics::PROJECT_ID));
+    let cobalt_fut = Task::spawn(cobalt_fut);
 
     // Use VFS to serve the out dir because ServiceFs does not support OPEN_RIGHT_EXECUTABLE and
     // pkgfs/{packages|versions|system} require it.
     let svc_dir = vfs::pseudo_directory! {};
     let cache_inspect_node = inspector.root().create_child("fuchsia.pkg.PackageCache");
     {
-        let pkgfs_versions = pkgfs_versions.clone();
-        let pkgfs_install = pkgfs_install.clone();
-        let pkgfs_needs = pkgfs_needs.clone();
         let package_index = Arc::clone(&package_index);
         let blobfs = blobfs.clone();
         let base_packages = Arc::clone(&base_packages);
         let cache_packages = Arc::new(cache_packages);
+        let non_static_allow_list = Arc::clone(&non_static_allow_list);
+        let scope = scope.clone();
         let cobalt_sender = cobalt_sender.clone();
         let cache_inspect_id = Arc::new(AtomicU32::new(0));
         let cache_get_node = Arc::new(cache_inspect_node.create_child("get"));
@@ -168,13 +157,13 @@
                 fidl_fuchsia_pkg::PackageCacheMarker::PROTOCOL_NAME,
                 vfs::service::host(move |stream: fidl_fuchsia_pkg::PackageCacheRequestStream| {
                     cache_service::serve(
-                        pkgfs_versions.clone(),
-                        pkgfs_install.clone(),
-                        pkgfs_needs.clone(),
                         Arc::clone(&package_index),
                         blobfs.clone(),
-                        base_packages.clone(),
+                        Arc::clone(&base_packages),
                         Arc::clone(&cache_packages),
+                        executability_restrictions,
+                        Arc::clone(&non_static_allow_list),
+                        scope.clone(),
                         stream,
                         cobalt_sender.clone(),
                         Arc::clone(&cache_inspect_id),
@@ -251,10 +240,8 @@
             crate::compat::pkgfs::make_dir(
                 Arc::clone(&base_packages),
                 Arc::clone(&package_index),
-                Arc::new(non_static_allow_list),
+                Arc::clone(&non_static_allow_list),
                 executability_restrictions,
-                // TODO(fxbug.dev/88871) Use a blobfs client with RX rights (instead of RW) to serve
-                // pkgfs.
                 blobfs.clone(),
                 system_image,
             )
@@ -262,7 +249,6 @@
         inspect_runtime::DIAGNOSTICS_DIR => inspect_runtime::create_diagnostics_dir(inspector),
     };
 
-    let scope = vfs::execution_scope::ExecutionScope::new();
     let () = out_dir.open(
         scope.clone(),
         fio::OpenFlags::RIGHT_READABLE
diff --git a/src/sys/pkg/bin/system-update-checker/meta/system_update_checker.core_shard.cml b/src/sys/pkg/bin/system-update-checker/meta/system_update_checker.core_shard.cml
index d7a27c2e..c2ec0e5 100644
--- a/src/sys/pkg/bin/system-update-checker/meta/system_update_checker.core_shard.cml
+++ b/src/sys/pkg/bin/system-update-checker/meta/system_update_checker.core_shard.cml
@@ -40,7 +40,7 @@
         },
         {
             protocol: "fuchsia.space.Manager",
-            from: "#pkg-cache",
+            from: "parent",
             to: "#system-update-checker",
         },
         {
diff --git a/src/sys/pkg/bin/system-updater/meta/system_updater.core_shard.cml b/src/sys/pkg/bin/system-updater/meta/system_updater.core_shard.cml
index 5c744e4..9ba0883 100644
--- a/src/sys/pkg/bin/system-updater/meta/system_updater.core_shard.cml
+++ b/src/sys/pkg/bin/system-updater/meta/system_updater.core_shard.cml
@@ -29,7 +29,7 @@
                 "fuchsia.pkg.RetainedPackages",
                 "fuchsia.space.Manager",
             ],
-            from: "#pkg-cache",
+            from: "parent",
             to: "#system-updater",
         },
         {
diff --git a/src/sys/pkg/lib/blobfs/src/lib.rs b/src/sys/pkg/lib/blobfs/src/lib.rs
index 29a0a24..af558c7 100644
--- a/src/sys/pkg/lib/blobfs/src/lib.rs
+++ b/src/sys/pkg/lib/blobfs/src/lib.rs
@@ -70,6 +70,18 @@
         Ok(Client { proxy })
     }
 
+    /// Returns a client connected to blobfs from the current component's namespace with
+    /// OPEN_RIGHT_READABLE, OPEN_RIGHT_WRITABLE, OPEN_RIGHT_EXECUTABLE.
+    pub fn open_from_namespace_rwx() -> Result<Self, BlobfsError> {
+        let proxy = io_util::directory::open_in_namespace(
+            "/blob",
+            fidl_fuchsia_io::OpenFlags::RIGHT_READABLE
+                | fidl_fuchsia_io::OpenFlags::RIGHT_WRITABLE
+                | fidl_fuchsia_io::OpenFlags::RIGHT_EXECUTABLE,
+        )?;
+        Ok(Client { proxy })
+    }
+
     /// Returns an client connected to blobfs from the given blobfs root dir.
     pub fn new(proxy: fio::DirectoryProxy) -> Self {
         Client { proxy }
diff --git a/src/sys/pkg/lib/fuchsia-pkg-testing/tests/pkgfs_test.rs b/src/sys/pkg/lib/fuchsia-pkg-testing/tests/pkgfs_test.rs
index 81deb0f..03e914f 100644
--- a/src/sys/pkg/lib/fuchsia-pkg-testing/tests/pkgfs_test.rs
+++ b/src/sys/pkg/lib/fuchsia-pkg-testing/tests/pkgfs_test.rs
@@ -1514,9 +1514,10 @@
     let this_pkg_dir =
         io_util::open_directory_in_namespace("/pkg", io_util::OpenFlags::RIGHT_READABLE)
             .expect("opening /pkg");
+
     let (status, flags) = this_pkg_dir.get_flags().await.expect("getting directory flags");
-    assert_eq!(status, Status::NOT_SUPPORTED.into_raw());
-    assert_eq!(flags, fio::OpenFlags::empty());
+    assert_eq!(status, Status::OK.into_raw());
+    assert_eq!(flags, io_util::OpenFlags::RIGHT_READABLE);
 
     // Try get_flags on a file within our package directory.
     // thinfs maps GetFlags to GetFlags, so this should not close the channel.
@@ -1553,7 +1554,7 @@
             .expect("opening /pkg/meta as file");
     let status =
         meta_far_file_proxy.set_flags(fio::OpenFlags::APPEND).await.expect("setting file flags");
-    assert_eq!(status, Status::NOT_SUPPORTED.into_raw());
+    assert_eq!(status, Status::OK.into_raw());
 
     // We should still be able to read our own package directory and read our own merkle root,
     // which means pkgfs hasn't crashed.
diff --git a/src/sys/pkg/lib/isolated-ota/README.md b/src/sys/pkg/lib/isolated-ota/README.md
index 414af30..2f5107f 100644
--- a/src/sys/pkg/lib/isolated-ota/README.md
+++ b/src/sys/pkg/lib/isolated-ota/README.md
@@ -9,16 +9,15 @@
 `//src/sys/pkg/lib/isolated-swd:isolated-swd-components`.
 
 It does this by setting up the software delivery stack:
-1. `pkgfs` is launched against the provided blobfs.
-2. `pkg-cache` is launched, using the `pkgfs` from step 1.
-3. `pkg-resolver` is launched, using the provided repository configuration and
+1. `pkg-cache` is launched against the provided blobfs.
+2. `pkg-resolver` is launched, using the provided repository configuration and
    channel, along with `pkg-cache` from step 2.
-4. If Omaha configuration is provided (an Omaha app id, and a URL to use for the
+3. If Omaha configuration is provided (an Omaha app id, and a URL to use for the
    Omaha server), the `omaha-client` state machine is launched. It performs an
    update check once, and the Omaha state machine calls the system-updater with
    the update package URI returned by Omaha.
-5. If no Omaha configuration is provided, `isolated-ota` launches the system
+4. If no Omaha configuration is provided, `isolated-ota` launches the system
    updater directly, using the default update URL.
-6. The system updater runs an OTA, resolving all of the packages in the new
+5. The system updater runs an OTA, resolving all of the packages in the new
    system using the `pkg-resolver` from step 3, and paving the images in the
    update package using the provided paver.
diff --git a/src/sys/pkg/lib/isolated-ota/src/lib.rs b/src/sys/pkg/lib/isolated-ota/src/lib.rs
index c68578c..ed44631 100644
--- a/src/sys/pkg/lib/isolated-ota/src/lib.rs
+++ b/src/sys/pkg/lib/isolated-ota/src/lib.rs
@@ -5,16 +5,13 @@
 use {
     fidl::endpoints::{ClientEnd, Proxy, ServerEnd},
     fidl_fuchsia_io as fio, fuchsia_async as fasync,
-    isolated_swd::{cache::Cache, omaha, pkgfs::Pkgfs, resolver::Resolver, updater::Updater},
+    isolated_swd::{cache::Cache, omaha, resolver::Resolver, updater::Updater},
     std::sync::Arc,
     thiserror::Error,
 };
 
 #[derive(Debug, Error)]
 pub enum UpdateError {
-    #[error("error launching pkgfs")]
-    PkgfsLaunchError(#[source] anyhow::Error),
-
     #[error("error launching pkg-cache")]
     PkgCacheLaunchError(#[source] anyhow::Error),
 
@@ -76,21 +73,12 @@
             .map_err(|e| UpdateError::FidlError(fidl::Error::AsyncChannel(e)))?,
     );
 
-    let pkgfs =
-        Pkgfs::launch(clone_blobfs(&blobfs_proxy)?).map_err(UpdateError::PkgfsLaunchError)?;
     let cache = Arc::new(
-        Cache::launch(&pkgfs, clone_blobfs(&blobfs_proxy)?)
-            .map_err(UpdateError::PkgCacheLaunchError)?,
+        Cache::launch(clone_blobfs(&blobfs_proxy)?).map_err(UpdateError::PkgCacheLaunchError)?,
     );
     let resolver = Arc::new(
-        Resolver::launch(
-            &pkgfs,
-            Arc::clone(&cache),
-            repository_config_file,
-            channel_name,
-            ssl_cert_dir,
-        )
-        .map_err(UpdateError::PkgResolverLaunchError)?,
+        Resolver::launch(Arc::clone(&cache), repository_config_file, channel_name, ssl_cert_dir)
+            .map_err(UpdateError::PkgResolverLaunchError)?,
     );
 
     let blobfs_clone = clone_blobfs(&blobfs_proxy)?;
diff --git a/src/sys/pkg/lib/isolated-swd/BUILD.gn b/src/sys/pkg/lib/isolated-swd/BUILD.gn
index 122609f..80065fa 100644
--- a/src/sys/pkg/lib/isolated-swd/BUILD.gn
+++ b/src/sys/pkg/lib/isolated-swd/BUILD.gn
@@ -37,7 +37,6 @@
     "//src/sys/pkg/lib/fuchsia-merkle",
     "//src/sys/pkg/lib/fuchsia-pkg-testing",
     "//src/sys/pkg/lib/omaha-client",
-    "//src/sys/pkg/lib/pkgfs",
     "//src/sys/pkg/lib/version",
     "//src/sys/pkg/testing/blobfs-ramdisk",
     "//src/sys/pkg/testing/mock-paver",
@@ -67,7 +66,6 @@
     "src/omaha/install_plan.rs",
     "src/omaha/installer.rs",
     "src/omaha/timer.rs",
-    "src/pkgfs.rs",
     "src/resolver.rs",
     "src/updater.rs",
   ]
@@ -93,7 +91,6 @@
     "//src/connectivity/network/dns:component-legacy",
     "//src/connectivity/network/netstack:component-legacy",
     "//src/storage/bin/blobfs",
-    "//src/sys/pkg/bin/pkgfs:pkgsvr_bin",
     "//src/sys/pkg/bin/pm:pm_bin",
     "//src/sys/pkg/lib/fuchsia-pkg-testing/certs",
     "//src/sys/pkg/tests/pkg-resolver:empty-repo",
diff --git a/src/sys/pkg/lib/isolated-swd/src/cache.rs b/src/sys/pkg/lib/isolated-swd/src/cache.rs
index dfa2b52..ecd3a96 100644
--- a/src/sys/pkg/lib/isolated-swd/src/cache.rs
+++ b/src/sys/pkg/lib/isolated-swd/src/cache.rs
@@ -3,7 +3,6 @@
 // found in the LICENSE file.
 
 use {
-    crate::pkgfs::Pkgfs,
     anyhow::{anyhow, Context, Error},
     fidl::endpoints::ClientEnd,
     fidl_fuchsia_io as fio,
@@ -24,27 +23,25 @@
 
 /// Represents the sandboxed package cache.
 pub struct Cache {
-    _pkg_cache: App,
+    pkg_cache: App,
     pkg_cache_directory: Arc<zx::Channel>,
     _env: NestedEnvironment,
 }
 
 impl Cache {
-    /// Launch the package cache using the given pkgfs and blobfs.
-    pub fn launch(pkgfs: &Pkgfs, blobfs: ClientEnd<fio::DirectoryMarker>) -> Result<Self, Error> {
-        Self::launch_with_components(pkgfs, blobfs, CACHE_URL)
+    /// Launch the package cache using the given blobfs.
+    pub fn launch(blobfs: ClientEnd<fio::DirectoryMarker>) -> Result<Self, Error> {
+        Self::launch_with_components(blobfs, CACHE_URL)
     }
 
     /// Launch the package cache. This is the same as `launch`, but the URL for the cache's
     /// manifest must be provided.
     fn launch_with_components(
-        pkgfs: &Pkgfs,
         blobfs: ClientEnd<fio::DirectoryMarker>,
         cache_url: &str,
     ) -> Result<Self, Error> {
         let mut pkg_cache = AppBuilder::new(cache_url)
             .arg("--ignore-system-image")
-            .add_handle_to_namespace("/pkgfs".to_owned(), pkgfs.root_handle()?.into_handle())
             .add_handle_to_namespace("/blob".to_owned(), blobfs.into_handle());
 
         let mut fs: ServiceFs<ServiceObj<'_, ()>> = ServiceFs::new();
@@ -71,13 +68,19 @@
         let directory = pkg_cache.directory_request().context("getting directory request")?.clone();
         let pkg_cache = pkg_cache.spawn(env.launcher()).context("launching package cache")?;
 
-        Ok(Cache { _pkg_cache: pkg_cache, pkg_cache_directory: directory, _env: env })
+        Ok(Cache { pkg_cache, pkg_cache_directory: directory, _env: env })
     }
 
     pub fn directory_request(&self) -> Arc<fuchsia_zircon::Channel> {
         self.pkg_cache_directory.clone()
     }
 
+    pub fn package_cache_proxy(&self) -> Result<fidl_fuchsia_pkg::PackageCacheProxy, Error> {
+        self.pkg_cache
+            .connect_to_protocol::<fidl_fuchsia_pkg::PackageCacheMarker>()
+            .context("connecting to PackageCache service")
+    }
+
     /// Serve a `CommitStatusProvider` that always says the system is committed.
     async fn serve_commit_status_provider(
         mut stream: CommitStatusProviderRequestStream,
@@ -99,11 +102,11 @@
 }
 
 pub mod for_tests {
-    use {super::*, crate::pkgfs::for_tests::PkgfsForTest};
+    use super::*;
 
     /// This wraps the `Cache` to reduce test boilerplate.
     pub struct CacheForTest {
-        pub pkgfs: PkgfsForTest,
+        pub blobfs: blobfs_ramdisk::BlobfsRamdisk,
         pub cache: Arc<Cache>,
     }
 
@@ -116,14 +119,16 @@
             )
         }
 
-        /// Create a new `Cache` and backing `pkgfs`.
+        /// Create a new `Cache` and backing blobfs.
         pub fn new_with_component(url: &str) -> Result<Self, Error> {
-            let pkgfs = PkgfsForTest::new().context("Launching pkgfs")?;
-            let blobfs = pkgfs.blobfs.root_dir_handle().context("blobfs handle")?;
-            let cache = Cache::launch_with_components(&pkgfs.pkgfs, blobfs, url)
-                .context("launching cache")?;
+            let blobfs = blobfs_ramdisk::BlobfsRamdisk::start().context("starting blobfs")?;
+            let cache = Cache::launch_with_components(
+                blobfs.root_dir_handle().context("blobfs handle")?,
+                url,
+            )
+            .context("launching cache")?;
 
-            Ok(CacheForTest { pkgfs, cache: Arc::new(cache) })
+            Ok(CacheForTest { blobfs, cache: Arc::new(cache) })
         }
     }
 }
@@ -136,11 +141,7 @@
     pub async fn test_cache_handles_sync() {
         let cache = CacheForTest::new().expect("launching cache");
 
-        let proxy = cache
-            .cache
-            ._pkg_cache
-            .connect_to_protocol::<fidl_fuchsia_pkg::PackageCacheMarker>()
-            .expect("connecting to pkg cache service");
+        let proxy = cache.cache.package_cache_proxy().unwrap();
 
         assert_eq!(proxy.sync().await.unwrap(), Ok(()));
     }
@@ -151,7 +152,7 @@
 
         let proxy = cache
             .cache
-            ._pkg_cache
+            .pkg_cache
             .connect_to_protocol::<fidl_fuchsia_space::ManagerMarker>()
             .expect("connecting to space manager service");
 
diff --git a/src/sys/pkg/lib/isolated-swd/src/lib.rs b/src/sys/pkg/lib/isolated-swd/src/lib.rs
index ad64807..2f129f2 100644
--- a/src/sys/pkg/lib/isolated-swd/src/lib.rs
+++ b/src/sys/pkg/lib/isolated-swd/src/lib.rs
@@ -4,6 +4,5 @@
 
 pub mod cache;
 pub mod omaha;
-pub mod pkgfs;
 pub mod resolver;
 pub mod updater;
diff --git a/src/sys/pkg/lib/isolated-swd/src/omaha.rs b/src/sys/pkg/lib/isolated-swd/src/omaha.rs
index ae7ef5f..c5a9313 100644
--- a/src/sys/pkg/lib/isolated-swd/src/omaha.rs
+++ b/src/sys/pkg/lib/isolated-swd/src/omaha.rs
@@ -213,7 +213,7 @@
             .expect("Creating resolver");
 
         install_update_with_http(
-            resolver.cache.pkgfs.blobfs.root_dir_handle().expect("getting blobfs root handle"),
+            resolver.cache.blobfs.root_dir_handle().expect("getting blobfs root handle"),
             ClientEnd::from(client),
             Arc::clone(&resolver.cache.cache),
             Arc::clone(&resolver.resolver),
diff --git a/src/sys/pkg/lib/isolated-swd/src/pkgfs.rs b/src/sys/pkg/lib/isolated-swd/src/pkgfs.rs
deleted file mode 100644
index c15f824..0000000
--- a/src/sys/pkg/lib/isolated-swd/src/pkgfs.rs
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2020 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-use {
-    anyhow::{Context, Error},
-    fdio::{SpawnAction, SpawnOptions},
-    fidl::endpoints::ClientEnd,
-    fidl_fuchsia_io as fio,
-    fuchsia_runtime::{HandleInfo, HandleType},
-    fuchsia_zircon as zx,
-    scoped_task::{self, Scoped},
-    std::ffi::CString,
-};
-
-const PKGSVR_PATH: &str = "/pkg/bin/pkgsvr";
-
-/// Represents the sandboxed pkgfs.
-pub struct Pkgfs {
-    _process: Scoped,
-    root: fio::DirectoryProxy,
-}
-
-impl Pkgfs {
-    /// Launch pkgfs using the given blobfs as the backing blob store.
-    pub fn launch(blobfs: ClientEnd<fio::DirectoryMarker>) -> Result<Self, Error> {
-        Pkgfs::launch_with_args(blobfs, true)
-    }
-
-    /// Launch pkgfs using the given blobfs as the backing blob store.
-    /// If enforce_non_static_allowlist is false, will disable the non-static package allowlist
-    /// (for use in tests).
-    fn launch_with_args(
-        blobfs: ClientEnd<fio::DirectoryMarker>,
-        enforce_non_static_allowlist: bool,
-    ) -> Result<Self, Error> {
-        let (proxy, server_end) = fidl::endpoints::create_proxy::<fio::DirectoryMarker>()?;
-        let handle_info = HandleInfo::new(HandleType::User0, 0);
-
-        // we use a scoped_task to prevent the pkgfs hanging around
-        // if our process dies.
-        let pkgsvr = scoped_task::spawn_etc(
-            scoped_task::job_default(),
-            SpawnOptions::CLONE_ALL,
-            &CString::new(PKGSVR_PATH).unwrap(),
-            &[
-                &CString::new(PKGSVR_PATH).unwrap(),
-                &CString::new(format!(
-                    "--enforcePkgfsPackagesNonStaticAllowlist={}",
-                    enforce_non_static_allowlist
-                ))
-                .unwrap(),
-            ],
-            None,
-            &mut [
-                SpawnAction::add_handle(handle_info, server_end.into_channel().into()),
-                SpawnAction::add_namespace_entry(
-                    &CString::new("/blob").unwrap(),
-                    blobfs.into_channel().into(),
-                ),
-            ],
-        )
-        .map_err(|(status, _)| status)
-        .context("spawn_etc failed")?;
-
-        Ok(Pkgfs { _process: pkgsvr, root: proxy })
-    }
-
-    /// Get a handle to the root directory of the pkgfs.
-    pub fn root_handle(&self) -> Result<ClientEnd<fio::DirectoryMarker>, Error> {
-        let (root_clone, server_end) = zx::Channel::create()?;
-        self.root.clone(fio::OpenFlags::CLONE_SAME_RIGHTS, server_end.into())?;
-        Ok(root_clone.into())
-    }
-}
-
-pub mod for_tests {
-    use {
-        super::*,
-        assert_matches::assert_matches,
-        blobfs_ramdisk::BlobfsRamdisk,
-        fuchsia_pkg_testing::Package,
-        pkgfs::{
-            self,
-            install::{BlobCreateError, BlobKind, BlobWriteSuccess},
-        },
-        std::io::Read,
-    };
-
-    /// This wraps `Pkgfs` in order to reduce test boilerplate.
-    pub struct PkgfsForTest {
-        pub blobfs: BlobfsRamdisk,
-        pub pkgfs: Pkgfs,
-    }
-
-    impl PkgfsForTest {
-        /// Launch pkgfs. The pkgsvr binary must be located at /pkg/bin/pkgsvr.
-        pub fn new() -> Result<Self, Error> {
-            let blobfs = BlobfsRamdisk::start().context("starting blobfs")?;
-            let pkgfs = Pkgfs::launch_with_args(
-                blobfs.root_dir_handle().context("getting blobfs root handle")?,
-                false,
-            )
-            .context("launching pkgfs")?;
-            Ok(PkgfsForTest { blobfs, pkgfs })
-        }
-
-        pub fn root_proxy(&self) -> Result<fio::DirectoryProxy, Error> {
-            Ok(self.pkgfs.root_handle()?.into_proxy()?)
-        }
-    }
-
-    /// Install the given package to pkgfs.
-    pub async fn install_package(root: &fio::DirectoryProxy, pkg: &Package) -> Result<(), Error> {
-        let installer =
-            pkgfs::install::Client::open_from_pkgfs_root(root).context("Opening pkgfs")?;
-
-        // install the meta far
-        let mut buf = vec![];
-        pkg.meta_far().unwrap().read_to_end(&mut buf)?;
-        let merkle = pkg.meta_far_merkle_root().to_owned();
-        let (blob, closer) = installer.create_blob(merkle, BlobKind::Package).await?;
-        let blob = blob.truncate(buf.len() as u64).await?;
-        assert_matches!(blob.write(&buf[..]).await, Ok(BlobWriteSuccess::Done));
-        closer.close().await;
-
-        // install the blobs in the package
-        for mut blob in pkg.content_blob_files() {
-            let mut buf = vec![];
-            blob.file.read_to_end(&mut buf).unwrap();
-            let blob_result = match installer.create_blob(blob.merkle, BlobKind::Data).await {
-                Ok((blob, closer)) => Ok(Some((blob, closer))),
-                Err(BlobCreateError::AlreadyExists) => Ok(None),
-                Err(e) => Err(e),
-            }?;
-
-            if let Some((blob, closer)) = blob_result {
-                let blob = blob.truncate(buf.len() as u64).await?;
-                assert_matches!(blob.write(&buf[..]).await, Ok(BlobWriteSuccess::Done));
-                closer.close().await;
-            }
-        }
-        Ok(())
-    }
-}
-
-#[cfg(test)]
-pub mod tests {
-    #[cfg(test)]
-    use fuchsia_pkg_testing::PackageBuilder;
-    use {
-        super::for_tests::{install_package, PkgfsForTest},
-        super::*,
-        fuchsia_async as fasync,
-    };
-
-    #[fasync::run_singlethreaded(test)]
-    pub async fn test_pkgfs_install() -> Result<(), Error> {
-        let pkgfs = PkgfsForTest::new()?;
-
-        let name = "pkgfs_install";
-        let package = PackageBuilder::new(name)
-            .add_resource_at("data/file1", "file with some test data".as_bytes())
-            .add_resource_at("data/file2", "file with some test data".as_bytes())
-            .add_resource_at("data/file3", "third, totally different file".as_bytes())
-            .build()
-            .await
-            .context("Building package")?;
-        install_package(&pkgfs.root_proxy()?, &package).await?;
-
-        let client = pkgfs::packages::Client::open_from_pkgfs_root(&pkgfs.root_proxy()?)?;
-        let dir = client.open_package(name, None).await?;
-        package.verify_contents(&dir.into_proxy()).await.unwrap();
-
-        Ok(())
-    }
-}
diff --git a/src/sys/pkg/lib/isolated-swd/src/resolver.rs b/src/sys/pkg/lib/isolated-swd/src/resolver.rs
index 47ff671..728a202 100644
--- a/src/sys/pkg/lib/isolated-swd/src/resolver.rs
+++ b/src/sys/pkg/lib/isolated-swd/src/resolver.rs
@@ -3,7 +3,7 @@
 // found in the LICENSE file.
 
 use {
-    crate::{cache::Cache, pkgfs::Pkgfs},
+    crate::cache::Cache,
     anyhow::{Context, Error},
     fidl_fuchsia_boot::{ArgumentsRequest, ArgumentsRequestStream},
     fidl_fuchsia_pkg::PackageCacheMarker,
@@ -13,7 +13,7 @@
         server::{NestedEnvironment, ServiceFs, ServiceObj},
     },
     fuchsia_syslog::fx_log_err,
-    fuchsia_zircon::{self as zx, HandleBased},
+    fuchsia_zircon::{self as zx},
     futures::prelude::*,
     std::{io::Write, sync::Arc},
 };
@@ -60,14 +60,12 @@
     /// Launch the package resolver using the given components, TUF repo/channel config,
     /// and SSL certificate folder.
     pub fn launch(
-        pkgfs: &Pkgfs,
         cache: Arc<Cache>,
         repo_config: std::fs::File,
         channel: &str,
         ssl_dir: std::fs::File,
     ) -> Result<Self, Error> {
         Resolver::launch_with_components(
-            pkgfs,
             cache,
             repo_config,
             Some(channel.to_owned()),
@@ -79,7 +77,6 @@
     /// Launch the package resolver. This is the same as `launch`, but the URL for the
     /// resolver's manifest must be provided.
     fn launch_with_components(
-        pkgfs: &Pkgfs,
         cache: Arc<Cache>,
         repo_config: std::fs::File,
         channel: Option<String>,
@@ -87,7 +84,6 @@
         resolver_url: &str,
     ) -> Result<Self, Error> {
         let mut pkg_resolver = AppBuilder::new(resolver_url)
-            .add_handle_to_namespace("/pkgfs".to_owned(), pkgfs.root_handle()?.into_handle())
             .add_dir_to_namespace("/config/data/repositories".to_owned(), repo_config)?
             .add_dir_to_namespace(SSL_CERTS_PATH.to_owned(), ssl_dir)?;
 
@@ -202,7 +198,6 @@
             let ssl_certs =
                 std::fs::File::open(SSL_TEST_CERTS_PATH).context("opening ssl certificates dir")?;
             let resolver = Resolver::launch_with_components(
-                &cache.pkgfs.pkgfs,
                 Arc::clone(&cache.cache),
                 repo_dir,
                 channel,
diff --git a/src/sys/pkg/lib/isolated-swd/src/updater.rs b/src/sys/pkg/lib/isolated-swd/src/updater.rs
index f906dd7a..b60ced7 100644
--- a/src/sys/pkg/lib/isolated-swd/src/updater.rs
+++ b/src/sys/pkg/lib/isolated-swd/src/updater.rs
@@ -201,7 +201,6 @@
         fuchsia_url::pkg_url::RepoUrl,
         fuchsia_zircon as zx,
         mock_paver::{MockPaverService, MockPaverServiceBuilder, PaverEvent},
-        pkgfs,
         std::collections::HashMap,
     };
 
@@ -353,7 +352,7 @@
             fasync::Task::spawn(fs.collect()).detach();
 
             let mut updater = Updater::launch_with_components(
-                resolver.cache.pkgfs.blobfs.root_dir_handle().expect("getting blobfs root handle"),
+                resolver.cache.blobfs.root_dir_handle().expect("getting blobfs root handle"),
                 ClientEnd::from(client),
                 Arc::clone(&resolver.cache.cache),
                 Arc::clone(&resolver.resolver),
@@ -390,14 +389,13 @@
             for package in self.packages.iter() {
                 // we deliberately avoid the package resolver here,
                 // as we want to make sure that the system-updater retrieved all the correct blobs.
-                let client = pkgfs::packages::Client::open_from_pkgfs_root(
-                    &self.resolver.cache.pkgfs.root_proxy()?,
+                let dir = fidl_fuchsia_pkg_ext::cache::Client::from_proxy(
+                    self.resolver.cache.cache.package_cache_proxy()?,
                 )
-                .context("opening pkgfs")?;
-                let dir = client
-                    .open_package(package.name().as_ref(), None)
-                    .await
-                    .context("opening package")?;
+                .open((*package.meta_far_merkle_root()).into())
+                .await
+                .context("opening package")?;
+
                 package
                     .verify_contents(&dir.into_proxy())
                     .await
diff --git a/src/sys/pkg/lib/package-directory/src/meta_as_dir.rs b/src/sys/pkg/lib/package-directory/src/meta_as_dir.rs
index 23631c6..1294130 100644
--- a/src/sys/pkg/lib/package-directory/src/meta_as_dir.rs
+++ b/src/sys/pkg/lib/package-directory/src/meta_as_dir.rs
@@ -138,7 +138,7 @@
             mode: fio::MODE_TYPE_DIRECTORY
                 | vfs::common::rights_to_posix_mode_bits(
                     true,  // read
-                    false, // write
+                    true, // write
                     true,  // execute
                 ),
             id: 1,
@@ -373,7 +373,7 @@
         assert_eq!(
             Directory::get_attrs(&meta_as_dir).await.unwrap(),
             fio::NodeAttributes {
-                mode: fio::MODE_TYPE_DIRECTORY | 0o500,
+                mode: fio::MODE_TYPE_DIRECTORY | 0o700,
                 id: 1,
                 content_size: 4,
                 storage_size: 4,
diff --git a/src/sys/pkg/lib/package-directory/src/meta_as_file.rs b/src/sys/pkg/lib/package-directory/src/meta_as_file.rs
index 2a5ecc7..d13bc64 100644
--- a/src/sys/pkg/lib/package-directory/src/meta_as_file.rs
+++ b/src/sys/pkg/lib/package-directory/src/meta_as_file.rs
@@ -114,7 +114,7 @@
             mode: fio::MODE_TYPE_FILE
                 | vfs::common::rights_to_posix_mode_bits(
                     true,  // read
-                    false, // write
+                    true, // write
                     false, // execute
                 ),
             id: 1,
@@ -359,7 +359,7 @@
         assert_eq!(
             File::get_attrs(&meta_as_file).await,
             Ok(fio::NodeAttributes {
-                mode: fio::MODE_TYPE_FILE | 0o400,
+                mode: fio::MODE_TYPE_FILE | 0o600,
                 id: 1,
                 content_size: 64,
                 storage_size: 64,
diff --git a/src/sys/pkg/lib/package-directory/src/meta_file.rs b/src/sys/pkg/lib/package-directory/src/meta_file.rs
index bc9f4af..dfea693 100644
--- a/src/sys/pkg/lib/package-directory/src/meta_file.rs
+++ b/src/sys/pkg/lib/package-directory/src/meta_file.rs
@@ -201,7 +201,7 @@
             mode: fio::MODE_TYPE_FILE
                 | vfs::common::rights_to_posix_mode_bits(
                     true,  // read
-                    false, // write
+                    true, // write
                     false, // execute
                 ),
             id: 1,
@@ -528,7 +528,7 @@
         assert_eq!(
             File::get_attrs(&meta_file).await,
             Ok(fio::NodeAttributes {
-                mode: fio::MODE_TYPE_FILE | 0o400,
+                mode: fio::MODE_TYPE_FILE | 0o600,
                 id: 1,
                 content_size: meta_file.location.length,
                 storage_size: meta_file.location.length,
diff --git a/src/sys/pkg/lib/package-directory/src/meta_subdir.rs b/src/sys/pkg/lib/package-directory/src/meta_subdir.rs
index 8d3fbbd..caaab70 100644
--- a/src/sys/pkg/lib/package-directory/src/meta_subdir.rs
+++ b/src/sys/pkg/lib/package-directory/src/meta_subdir.rs
@@ -143,7 +143,7 @@
             mode: fio::MODE_TYPE_DIRECTORY
                 | vfs::common::rights_to_posix_mode_bits(
                     true,  // read
-                    false, // write
+                    true, // write
                     true,  // execute
                 ),
             id: 1,
@@ -358,7 +358,7 @@
         assert_eq!(
             Directory::get_attrs(&sub_dir).await.unwrap(),
             fio::NodeAttributes {
-                mode: fio::MODE_TYPE_DIRECTORY | 0o500,
+                mode: fio::MODE_TYPE_DIRECTORY | 0o700,
                 id: 1,
                 content_size: 4,
                 storage_size: 4,
diff --git a/src/sys/pkg/lib/package-directory/src/non_meta_subdir.rs b/src/sys/pkg/lib/package-directory/src/non_meta_subdir.rs
index 5c05476..92770ad 100644
--- a/src/sys/pkg/lib/package-directory/src/non_meta_subdir.rs
+++ b/src/sys/pkg/lib/package-directory/src/non_meta_subdir.rs
@@ -144,7 +144,7 @@
             mode: fio::MODE_TYPE_DIRECTORY
                 | vfs::common::rights_to_posix_mode_bits(
                     true,  // read
-                    false, // write
+                    true, // write
                     true,  // execute
                 ),
             id: 1,
@@ -202,7 +202,7 @@
         assert_eq!(
             sub_dir.get_attrs().await.unwrap(),
             fio::NodeAttributes {
-                mode: fio::MODE_TYPE_DIRECTORY | 0o500,
+                mode: fio::MODE_TYPE_DIRECTORY | 0o700,
                 id: 1,
                 content_size: 0,
                 storage_size: 0,
diff --git a/src/sys/pkg/lib/package-directory/src/root_dir.rs b/src/sys/pkg/lib/package-directory/src/root_dir.rs
index ae4baf9..d0bec90 100644
--- a/src/sys/pkg/lib/package-directory/src/root_dir.rs
+++ b/src/sys/pkg/lib/package-directory/src/root_dir.rs
@@ -321,7 +321,7 @@
             mode: fio::MODE_TYPE_DIRECTORY
                 | vfs::common::rights_to_posix_mode_bits(
                     true,  // read
-                    false, // write
+                    true, // write
                     true,  // execute
                 ),
             id: 1,
@@ -486,7 +486,7 @@
         assert_eq!(
             Directory::get_attrs(&root_dir).await.unwrap(),
             fio::NodeAttributes {
-                mode: fio::MODE_TYPE_DIRECTORY | 0o500,
+                mode: fio::MODE_TYPE_DIRECTORY | 0o700,
                 id: 1,
                 content_size: 0,
                 storage_size: 0,
diff --git a/src/sys/pkg/tests/pkg-cache/src/executability_enforcement.rs b/src/sys/pkg/tests/pkg-cache/src/executability_enforcement.rs
index 11334cd..6c882ea 100644
--- a/src/sys/pkg/tests/pkg-cache/src/executability_enforcement.rs
+++ b/src/sys/pkg/tests/pkg-cache/src/executability_enforcement.rs
@@ -4,6 +4,7 @@
 
 use {
     crate::TestEnv,
+    assert_matches::assert_matches,
     blobfs_ramdisk::BlobfsRamdisk,
     fidl_fuchsia_io as fio,
     fuchsia_pkg_testing::{Package, PackageBuilder, SystemImageBuilder},
@@ -66,10 +67,10 @@
         .await;
     }
 
-    async fn verify_flags(dir: &fio::DirectoryProxy, _expected_flags: fio::OpenFlags) {
-        let (status, _flags) = dir.get_flags().await.unwrap();
-        // TODO(fxbug.dev/88871) Assert the expected flags once `get_flags` is supported.
-        assert_eq!(status, Status::NOT_SUPPORTED.into_raw());
+    async fn verify_flags(dir: &fio::DirectoryProxy, expected_flags: fio::OpenFlags) {
+        let (status, flags) = dir.get_flags().await.unwrap();
+        let () = Status::ok(status).unwrap();
+        assert_eq!(flags, expected_flags);
     }
 
     // Verify Get flags
@@ -79,9 +80,7 @@
     // Verify Open flags
     let open_res = env.open_package(&pkg.meta_far_merkle_root().to_string()).await;
     let () = match is_retained {
-        // TODO(fxbug.dev/88871) After the pkgfs migration, retained packages should
-        // fail to open with NOT_FOUND.
-        IsRetained::True => verify_flags(&open_res.unwrap(), expected_flags).await,
+        IsRetained::True => assert_matches!(open_res, Err(status) if status == Status::NOT_FOUND),
         IsRetained::False => verify_flags(&open_res.unwrap(), expected_flags).await,
     };
 
diff --git a/src/sys/pkg/tests/pkg-cache/src/lib.rs b/src/sys/pkg/tests/pkg-cache/src/lib.rs
index e777cdb..7465723 100644
--- a/src/sys/pkg/tests/pkg-cache/src/lib.rs
+++ b/src/sys/pkg/tests/pkg-cache/src/lib.rs
@@ -456,7 +456,9 @@
                         Capability::directory("pkgfs").path("/pkgfs").rights(fio::RW_STAR_DIR),
                     )
                     .capability(
-                        Capability::directory("blob").path("/blob").rights(fio::RW_STAR_DIR),
+                        Capability::directory("blob-exec")
+                            .path("/blob")
+                            .rights(fio::RW_STAR_DIR | fio::Operations::EXECUTE),
                     )
                     .from(&service_reflector)
                     .to(&pkg_cache),
diff --git a/src/sys/pkg/tests/pkg-resolver/src/lib.rs b/src/sys/pkg/tests/pkg-resolver/src/lib.rs
index 8c3e5e5..5e0077a 100644
--- a/src/sys/pkg/tests/pkg-resolver/src/lib.rs
+++ b/src/sys/pkg/tests/pkg-resolver/src/lib.rs
@@ -647,7 +647,9 @@
                         Capability::directory("pkgfs").path("/pkgfs").rights(fio::RW_STAR_DIR),
                     )
                     .capability(
-                        Capability::directory("blob").path("/blob").rights(fio::RW_STAR_DIR),
+                        Capability::directory("blob-exec")
+                            .path("/blob")
+                            .rights(fio::RW_STAR_DIR | fio::Operations::EXECUTE),
                     )
                     .from(&service_reflector)
                     .to(&pkg_cache),
diff --git a/src/sys/pkg/tests/pkgdir/README.md b/src/sys/pkg/tests/pkgdir/README.md
index a9e3615..2323851 100644
--- a/src/sys/pkg/tests/pkgdir/README.md
+++ b/src/sys/pkg/tests/pkgdir/README.md
@@ -201,11 +201,10 @@
 messages on the cloned channel and isn't even guaranteed to close its end of the
 channel.
 
-### mode protection write bit and group and other bytes not set
+### mode protection group and other bytes not set
 
-When `GetAttrs()` is called, package-directory always returns `0o500` for
-directories and `0o400` for files (directories can be read and traversed, files
-can be read, and
+When `GetAttrs()` is called, package-directory always returns `0o700` for
+directories and `0o500` for files (
 [zxio only uses the USR byte](https://cs.opensource.google/fuchsia/fuchsia/+/main:sdk/lib/zxio/remote.cc;l=339;drc=e3ffcb20a4605ae4299ce5888fce6becabe2f8a9))
 for the
 [mode protection bits](https://cs.opensource.google/fuchsia/fuchsia/+/main:sdk/fidl/fuchsia.io/directory.fidl;l=97;drc=1cc9164ebb39d1c4b070e23f3808216403fcb526)
diff --git a/src/sys/pkg/tests/pkgdir/src/node.rs b/src/sys/pkg/tests/pkgdir/src/node.rs
index b2bf67e..d0b258a 100644
--- a/src/sys/pkg/tests/pkgdir/src/node.rs
+++ b/src/sys/pkg/tests/pkgdir/src/node.rs
@@ -88,8 +88,8 @@
             open_flags: fio::OpenFlags::RIGHT_READABLE | fio::OpenFlags::RIGHT_EXECUTABLE,
             expected_mode: fio::MODE_TYPE_DIRECTORY
                 | if source.is_pkgdir() {
-                    // "mode protection write bit and group and other bytes not set"
-                    0o500
+                    // "mode protection group and other bytes not set"
+                    0o700
                 } else {
                     0o755
                 },
@@ -109,8 +109,8 @@
         Args {
             expected_mode: fio::MODE_TYPE_DIRECTORY
                 | if source.is_pkgdir() {
-                    // "mode protection write bit and group and other bytes not set"
-                    0o500
+                    // "mode protection group and other bytes not set"
+                    0o700
                 } else {
                     0o755
                 },
@@ -145,8 +145,8 @@
             open_mode: fio::MODE_TYPE_FILE,
             expected_mode: fio::MODE_TYPE_FILE
                 | if source.is_pkgdir() {
-                    // "mode protection write bit and group and other bytes not set"
-                    0o400
+                    // "mode protection group and other bytes not set"
+                    0o600
                 } else {
                     0o644
                 },
@@ -169,8 +169,8 @@
             open_mode: fio::MODE_TYPE_DIRECTORY,
             expected_mode: fio::MODE_TYPE_DIRECTORY
                 | if source.is_pkgdir() {
-                    // "mode protection write bit and group and other bytes not set"
-                    0o500
+                    // "mode protection group and other bytes not set"
+                    0o700
                 } else {
                     0o755
                 },
@@ -192,8 +192,8 @@
         Args {
             expected_mode: fio::MODE_TYPE_DIRECTORY
                 | if source.is_pkgdir() {
-                    // "mode protection write bit and group and other bytes not set"
-                    0o500
+                    // "mode protection group and other bytes not set"
+                    0o700
                 } else {
                     0o755
                 },
@@ -215,8 +215,8 @@
         Args {
             expected_mode: fio::MODE_TYPE_FILE
                 | if source.is_pkgdir() {
-                    // "mode protection write bit and group and other bytes not set"
-                    0o400
+                    // "mode protection group and other bytes not set"
+                    0o600
                 } else {
                     0o644
                 },
diff --git a/src/sys/root/root-base.shard.cml b/src/sys/root/root-base.shard.cml
index 9375681..ce9d84f 100644
--- a/src/sys/root/root-base.shard.cml
+++ b/src/sys/root/root-base.shard.cml
@@ -128,12 +128,15 @@
                 "fuchsia.logger.Log",
                 "fuchsia.logger.LogSink",
                 "fuchsia.paver.Paver",
+                "fuchsia.pkg.PackageCache",
+                "fuchsia.pkg.RetainedPackages",
                 "fuchsia.power.button.Monitor",
                 "fuchsia.power.clientlevel.Connector",
                 "fuchsia.power.profile.Watcher",
                 "fuchsia.power.systemmode.ClientConfigurator",
                 "fuchsia.power.systemmode.Requester",
                 "fuchsia.scheduler.ProfileProvider",
+                "fuchsia.space.Manager",
                 "fuchsia.sysinfo.SysInfo",
                 "fuchsia.sysmem.Allocator",
                 "fuchsia.thermal.ClientStateConnector",
@@ -175,6 +178,7 @@
                 "fuchsia.tracing.controller.Controller",
                 "fuchsia.tracing.provider.Registry",
                 "fuchsia.ui.activity.Provider",
+                "fuchsia.update.CommitStatusProvider",
                 "fuchsia.virtualization.Manager",
             ],
             from: "#core",
diff --git a/src/sys/sysmgr/BUILD.gn b/src/sys/sysmgr/BUILD.gn
index 063c082..bf10d04 100644
--- a/src/sys/sysmgr/BUILD.gn
+++ b/src/sys/sysmgr/BUILD.gn
@@ -95,10 +95,6 @@
   name = "router.config"
 }
 
-cfg("sshd_host_config") {
-  name = "sshd_host.config"
-}
-
 config("update_packages_config") {
   defines = [ "AUTO_UPDATE_PACKAGES" ]
 }
diff --git a/src/sys/sysmgr/config/sshd_host.config b/src/sys/sysmgr/config/sshd_host.config
deleted file mode 100644
index 4ffe61d..0000000
--- a/src/sys/sysmgr/config/sshd_host.config
+++ /dev/null
@@ -1,5 +0,0 @@
-{
-  "apps": [
-    "fuchsia-pkg://fuchsia.com/sshd-host#meta/sshd-host.cmx"
-  ]
-}
diff --git a/tools/cmc/build/restricted_features/BUILD.gn b/tools/cmc/build/restricted_features/BUILD.gn
index 11e5f7f..1d54429 100644
--- a/tools/cmc/build/restricted_features/BUILD.gn
+++ b/tools/cmc/build/restricted_features/BUILD.gn
@@ -74,7 +74,7 @@
     "//vendor/google/sessions:*",
 
     # tests
-    "//src/lib/assembly/structured_config:*",
+    "//src/lib/assembly/structured_config/tests/*:*",
     "//src/lib/component_hub/tests:*",
     "//src/lib/fuchsia-component-test/tests/echo_client_sc:*",
     "//src/sys/component_manager/tests/hub:*",
diff --git a/zircon/kernel/vm/include/vm/page_source.h b/zircon/kernel/vm/include/vm/page_source.h
index 7f8d100..14af77e 100644
--- a/zircon/kernel/vm/include/vm/page_source.h
+++ b/zircon/kernel/vm/include/vm/page_source.h
@@ -342,6 +342,8 @@
   mutable DECLARE_MUTEX(PageSource) page_source_mtx_;
   bool detached_ TA_GUARDED(page_source_mtx_) = false;
   bool closed_ TA_GUARDED(page_source_mtx_) = false;
+  // We cache the immutable page_provider_->properties() to avoid many virtual calls.
+  const PageSourceProperties page_provider_properties_;
 
   // Trees of outstanding requests which have been sent to the PageProvider, one for each supported
   // page request type. These lists are keyed by the end offset of the requests (not the start
@@ -357,8 +359,6 @@
   // PageProvider instance that will provide pages asynchronously (e.g. a userspace pager, see
   // PagerProxy for details).
   const fbl::RefPtr<PageProvider> page_provider_;
-  // We cache the immutable page_provider_->properties() to avoid many virtual calls.
-  const PageSourceProperties page_provider_properties_;
 
   // Helper that adds page at |offset| to |request| and potentially forwards it to the provider.
   // |request| must already be initialized. |offset| must be page-aligned.
diff --git a/zircon/kernel/vm/page_source.cc b/zircon/kernel/vm/page_source.cc
index 744c05b..e5c4824 100644
--- a/zircon/kernel/vm/page_source.cc
+++ b/zircon/kernel/vm/page_source.cc
@@ -17,8 +17,8 @@
 #define LOCAL_TRACE 0
 
 PageSource::PageSource(fbl::RefPtr<PageProvider>&& page_provider)
-    : page_provider_(ktl::move(page_provider)),
-      page_provider_properties_(page_provider_->properties()) {
+    : page_provider_properties_(page_provider->properties()),
+      page_provider_(ktl::move(page_provider)) {
   LTRACEF("%p\n", this);
 }
 
diff --git a/zircon/system/ulib/fidl/BUILD.gn b/zircon/system/ulib/fidl/BUILD.gn
index f4ceb5f..e8d5837 100644
--- a/zircon/system/ulib/fidl/BUILD.gn
+++ b/zircon/system/ulib/fidl/BUILD.gn
@@ -79,7 +79,7 @@
 
     # TODO(fxbug.dev/80525): Change this back to "partner" post API stability.
     sdk_publishable = "experimental"
-    sdk_name = "fidl-llcpp-experimental-driver-only"
+    sdk_name = "fidl-llcpp"
     sdk_headers = [
                     "lib/fidl/llcpp/array.h",
                     "lib/fidl/llcpp/async_binding.h",
diff --git a/zircon/system/ulib/zxc/BUILD.gn b/zircon/system/ulib/zxc/BUILD.gn
index edad92c..a5306ac 100644
--- a/zircon/system/ulib/zxc/BUILD.gn
+++ b/zircon/system/ulib/zxc/BUILD.gn
@@ -5,8 +5,8 @@
 import("//build/zircon/migrated_targets.gni")
 
 sdk_source_set("fitx") {
-  category = "partner"
-  sdk_name = "fitx-experimental-driver-only"
+  category = "experimental"
+  sdk_name = "fitx"
   sources = [
     "include/lib/fitx/internal/compiler.h",
     "include/lib/fitx/internal/result.h",
@@ -15,8 +15,8 @@
 }
 
 sdk_source_set("zx") {
-  category = "partner"
-  sdk_name = "zx-experimental-driver-only"
+  category = "experimental"
+  sdk_name = "zx-status"
   sources = [ "include/lib/zx/status.h" ]
   public_deps = [ ":fitx" ]
   if (zircon_toolchain != false) {
@@ -27,8 +27,7 @@
 zx_library("zxc") {
   sdk = "source"
 
-  sdk_publishable = true
-  sdk_name = "zxc-experimental-driver-only"
+  sdk_publishable = "experimental"
   sdk_headers = [
     "lib/fitx/internal/compiler.h",
     "lib/fitx/internal/result.h",
diff --git a/zircon/system/ulib/zxc/fitx-experimental-driver-only.api b/zircon/system/ulib/zxc/fitx-experimental-driver-only.api
deleted file mode 100644
index adee59c..0000000
--- a/zircon/system/ulib/zxc/fitx-experimental-driver-only.api
+++ /dev/null
@@ -1,5 +0,0 @@
-{
-  "pkg/fitx-experimental-driver-only/include/lib/fitx/internal/compiler.h": "3b050f1cec235f382982fbee11ad9a26",
-  "pkg/fitx-experimental-driver-only/include/lib/fitx/internal/result.h": "f56b1896a3f2e1f94d30700dc5083066",
-  "pkg/fitx-experimental-driver-only/include/lib/fitx/result.h": "914de85b14b2c20d12bc1a53d0033fd9"
-}
\ No newline at end of file
diff --git a/zircon/system/ulib/zxc/zx-experimental-driver-only.api b/zircon/system/ulib/zxc/zx-experimental-driver-only.api
deleted file mode 100644
index cdb2764..0000000
--- a/zircon/system/ulib/zxc/zx-experimental-driver-only.api
+++ /dev/null
@@ -1,3 +0,0 @@
-{
-  "pkg/zx-experimental-driver-only/include/lib/zx/status.h": "36bbdeaf15958596284fba6774e94ef2"
-}
\ No newline at end of file