[pkg] Delete pkgfs

Removes pkgfs from the set of base packages.
Removes pkgfs/pkgsvr code from the Go Grand Unified Binary.

Fixed: 88872
Fixed: 102162
Change-Id: Ic101a7f0e682e5568949ce9641bbeca23e480bdc
Reviewed-on: https://fuchsia-review.googlesource.com/c/fuchsia/+/691908
Reviewed-by: Tamir Duberstein <tamird@google.com>
Reviewed-by: Aidan Wolter <awolter@google.com>
Commit-Queue: Ben Keller <galbanum@google.com>
Reviewed-by: Kevin Wells <kevinwells@google.com>
Reviewed-by: Mark Dittmer <markdittmer@google.com>
Size-Review: Saman Sami <samans@google.com>
diff --git a/build/assembly/assembled_system.gni b/build/assembly/assembled_system.gni
index 28e6b9d..ee12459 100644
--- a/build/assembly/assembled_system.gni
+++ b/build/assembly/assembled_system.gni
@@ -84,8 +84,7 @@
 #
 #   devmgr_config (default: [])
 #     [list of strings] List of arguments to add to /boot/config/devmgr.
-#     These arguments come after synthesized arguments to configure blobfs and
-#     pkgfs.
+#     These arguments come after synthesized arguments to configure blobfs.
 #
 #   fshost_config (default: {})
 #     [scope] Arguments to add to fshost's configuration. These
@@ -114,10 +113,6 @@
 #     should be "system_image_prime", so that the package does not override the
 #     non-prime package during publishing.
 #
-#   pkgfs_package_label (optional; default: //src/sys/pkg/bin/pkgfs/pkgfs)
-#     [GN label] The pkgfs implementation to use, for those assemblies that
-#     require one.
-#
 # Board parameters:
 #
 #   assembly_compress_blobs (default: true)
@@ -236,7 +231,6 @@
     config_data = "${image_name}.config-data"
     shell_commands = "${image_name}.shell_commands"
     fshost_config = "${image_name}.fshost_config"
-    pkgfs_package_label = "//src/sys/pkg/bin/pkgfs"
     compare_command_logs = "${image_name}.compare_command_logs"
     compare_images_manifests = "${image_name}.compare_images_manifests"
     platform_aib = "//build/assembly"
@@ -494,13 +488,6 @@
     if (generate_fvm) {
       base_packages = _base_packages
       base_packages += _meta_packages
-
-      if (defined(invoker.pkgfs_package_label)) {
-        base_packages += [ invoker.pkgfs_package_label ]
-      } else {
-        base_packages += [ labels.pkgfs_package_label ]
-      }
-
       cache_packages = _cache_packages
     }
 
@@ -518,7 +505,7 @@
     bootfs_labels += [ "//zircon/kernel" ]
     bootfs_labels += [ ":${labels.fshost_config}" ]
 
-    # Only builds with an FVM need base-resolver for pkgfs.
+    # Only builds with an FVM need base-resolver/pkg-cache-resolver.
     if (generate_fvm) {
       bootfs_labels += [ "//src/sys/base-resolver:bootfs" ]
     }
diff --git a/src/developer/ffx/plugins/assembly/src/zbi.rs b/src/developer/ffx/plugins/assembly/src/zbi.rs
index 8f0d3f7..50520c0 100644
--- a/src/developer/ffx/plugins/assembly/src/zbi.rs
+++ b/src/developer/ffx/plugins/assembly/src/zbi.rs
@@ -4,7 +4,7 @@
 
 use crate::base_package::BasePackage;
 
-use anyhow::{anyhow, Context, Result};
+use anyhow::{anyhow, Result};
 use assembly_config::ImageAssemblyConfig;
 use assembly_images_config::{Zbi, ZbiCompression};
 use assembly_images_manifest::{Image, ImagesManifest};
@@ -49,27 +49,10 @@
         zbi_builder.add_boot_arg("devmgr.require-system=true");
 
         // Specify how to launch pkgfs: bin/pkgsvr <base-merkle>
+        // This is still needed even though pkgfs has been removed because pkg-cache and
+        // pkg-cache-resolver use it to obtain the base_package hash.
         zbi_builder
             .add_boot_arg(&format!("zircon.system.pkgfs.cmd=bin/pkgsvr+{}", &base_package.merkle));
-
-        // Add the pkgfs blobs to the boot arguments, so that pkgfs can be bootstrapped out of blobfs,
-        // before the blobfs service is available.
-        let pkgfs_manifest: PackageManifest = product
-            .base
-            .iter()
-            .find_map(|p| {
-                if let Ok(m) = PackageManifest::try_load_from(p) {
-                    if m.name().as_ref() == "pkgfs" {
-                        return Some(m);
-                    }
-                }
-                return None;
-            })
-            .context("Failed to find pkgfs in the base packages")?;
-
-        pkgfs_manifest.into_blobs().into_iter().filter(|b| b.path != "meta/").for_each(|b| {
-            zbi_builder.add_boot_arg(&format!("zircon.system.pkgfs.file.{}={}", b.path, b.merkle));
-        });
     }
 
     // Add the command line.
diff --git a/src/go/grand_unified_binary/BUILD.gn b/src/go/grand_unified_binary/BUILD.gn
index 6fc326a..5501a8f 100644
--- a/src/go/grand_unified_binary/BUILD.gn
+++ b/src/go/grand_unified_binary/BUILD.gn
@@ -6,10 +6,7 @@
 import("//build/go/go_library.gni")
 
 go_library("lib") {
-  deps = [
-    "//src/connectivity/network/netstack:lib",
-    "//src/sys/pkg/bin/pkgfs:pmd",
-  ]
+  deps = [ "//src/connectivity/network/netstack:lib" ]
   sources = [ "gub.go" ]
 }
 
diff --git a/src/go/grand_unified_binary/gub.go b/src/go/grand_unified_binary/gub.go
index 8bdcf81..7054519 100644
--- a/src/go/grand_unified_binary/gub.go
+++ b/src/go/grand_unified_binary/gub.go
@@ -18,7 +18,6 @@
 	"strings"
 
 	"go.fuchsia.dev/fuchsia/src/connectivity/network/netstack"
-	"go.fuchsia.dev/fuchsia/src/sys/pkg/bin/pkgfs/pkgsvr"
 )
 
 func main() {
@@ -29,8 +28,6 @@
 	name := filepath.Base(os.Args[0])
 	name = strings.SplitN(name, ".", 2)[0]
 	switch name {
-	case "pkgsvr":
-		pkgsvr.Main()
 	case "netstack":
 		netstack.Main()
 	default:
diff --git a/src/security/ffx_test/BUILD.gn b/src/security/ffx_test/BUILD.gn
index d51ac5b..6f02ab12 100644
--- a/src/security/ffx_test/BUILD.gn
+++ b/src/security/ffx_test/BUILD.gn
@@ -7,12 +7,6 @@
 import("//build/config.gni")
 import("//build/testing/host_test_data.gni")
 
-# `assembled_system` requires a package with the name "pkgfs".
-fuchsia_package("empty_pkgfs") {
-  testonly = true
-  package_name = "pkgfs"
-}
-
 # `assembled_system` needs at least one package in base_packages.
 fuchsia_package("empty-pkg") {
   testonly = true
@@ -34,7 +28,6 @@
     ":empty-pkg",
     ":empty_config",
   ]
-  pkgfs_package_label = ":empty_pkgfs"
 }
 
 # `assembled_system` builds a zbi, specifying `ramdisk_fvm_in_zbi = true`
@@ -49,7 +42,6 @@
     ":empty-pkg",
     ":empty_config",
   ]
-  pkgfs_package_label = ":empty_pkgfs"
 }
 
 if (is_host) {
diff --git a/src/security/pkg_test/assemblies/assemble_security_pkg_test_system.gni b/src/security/pkg_test/assemblies/assemble_security_pkg_test_system.gni
index 2da3a91..3d233e6 100644
--- a/src/security/pkg_test/assemblies/assemble_security_pkg_test_system.gni
+++ b/src/security/pkg_test/assemblies/assemble_security_pkg_test_system.gni
@@ -105,7 +105,6 @@
     assembly_image_assembler = "${assembly_name}.image_assembler"
     assembly_partitions_config = "${assembly_name}_partitions_config"
     base_packages = "${assembly_name}.base_packages"
-    empty_pkgfs = "${assembly_name}_empty_pkgfs"
     build_info = "${assembly_name}_build_info"
     repository_config = "${assembly_name}_repository_config"
     packages_json = "${assembly_name}_packages_json"
@@ -184,13 +183,6 @@
     not_needed(invoker, [ "root_ssl_cert" ])
   }
 
-  # `assembled_system(...) { base_packages }` requires a package with the name
-  # "pkgfs".
-  fuchsia_package(labels.empty_pkgfs) {
-    testonly = true
-    package_name = "pkgfs"
-  }
-
   # Test-only root SSL certificates for domain names that may be used in tests.
   fuchsia_package(labels.root_ssl_certificates) {
     testonly = true
@@ -220,7 +212,7 @@
                             # use.
                             "//src/sys/pkg/bin/system-update-committer:enable_reboot_on_verification_failure",
                           ] + invoker.base_packages
-  all_base_packages = packages_for_assembly + [ ":${labels.empty_pkgfs}" ]
+  all_base_packages = packages_for_assembly
   packages_for_update = all_base_packages + [
                           ":${labels.update_package}",
                           ":${labels.meta_packages}",
@@ -233,7 +225,6 @@
     board_name = invoker.board_name
     bootfs_labels = []
     base_packages = packages_for_assembly
-    pkgfs_package_label = ":${labels.empty_pkgfs}"
   }
 
   # Process packages.json to use a custom domain name. This mirrors the domain
diff --git a/src/sys/pkg/BUILD.gn b/src/sys/pkg/BUILD.gn
index 1765ee4..2bd79a4 100644
--- a/src/sys/pkg/BUILD.gn
+++ b/src/sys/pkg/BUILD.gn
@@ -33,7 +33,6 @@
   public_deps = [
     "bin/pkg-cache",
     "bin/pkg-resolver",
-    "bin/pkgfs",
     "bin/system-update-committer",
     "bin/system-updater",
   ]
diff --git a/src/sys/pkg/bin/BUILD.gn b/src/sys/pkg/bin/BUILD.gn
index 007901a..933572c 100644
--- a/src/sys/pkg/bin/BUILD.gn
+++ b/src/sys/pkg/bin/BUILD.gn
@@ -23,7 +23,6 @@
     "pkg-cache:tests",
     "pkg-resolver:tests",
     "pkgctl:tests",
-    "pkgfs:tests",
     "pm:tests",
     "system-update-checker:tests",
     "system-update-committer:tests",
diff --git a/src/sys/pkg/bin/pkgfs/BUILD.gn b/src/sys/pkg/bin/pkgfs/BUILD.gn
deleted file mode 100644
index 530d9e3..0000000
--- a/src/sys/pkg/bin/pkgfs/BUILD.gn
+++ /dev/null
@@ -1,102 +0,0 @@
-# Copyright 2017 The Fuchsia Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import("//build/components.gni")
-import("//build/go/go_library.gni")
-import("//build/go/go_test.gni")
-import("//build/go/toolchain.gni")
-import("//src/go/grand_unified_binary/gub.gni")
-
-go_library("pmd") {
-  name = "go.fuchsia.dev/fuchsia/src/sys/pkg/bin/pkgfs/..."
-
-  deps = [
-    "//sdk/fidl/fuchsia.sys($go_toolchain)",
-    "//src/lib/component",
-    "//src/lib/thinfs:thinfs_lib",
-    "//src/sys/pkg/bin/pm:pm_lib",
-    "//src/sys/pkg/lib/far/go:far",
-    "//src/sys/pkg/lib/merkle",
-  ]
-
-  sources = [
-    "allowlist/allowlist.go",
-    "allowlist/allowlist_test.go",
-    "blobfs/blobfs.go",
-    "index/dynamic_index.go",
-    "index/dynamic_index_test.go",
-    "index/static_index.go",
-    "index/static_index_test.go",
-    "iou/iou.go",
-    "pkgfs/ctl_directory.go",
-    "pkgfs/dents.go",
-    "pkgfs/metafar.go",
-    "pkgfs/needs_directory.go",
-    "pkgfs/package_directory.go",
-    "pkgfs/package_install_directory.go",
-    "pkgfs/package_list_directories.go",
-    "pkgfs/pkgfs.go",
-    "pkgfs/pkgfs_test.go",
-    "pkgfs/root_directory.go",
-    "pkgfs/unsupported_vnodes.go",
-    "pkgfs/validation_directory.go",
-    "pkgfs/validation_directory_test.go",
-    "pkgfs/versions_directory.go",
-    "pkgsvr/pkgsvr.go",
-    "ramdisk/ramdisk.go",
-  ]
-}
-
-grand_unified_binary("pkgsvr_bin") {
-  output_name = "pkgsvr"
-}
-
-fuchsia_component("pkgsvr") {
-  deps = [ ":pkgsvr_bin" ]
-  manifest = "meta/pkgsvr.cmx"
-}
-
-fuchsia_package("pkgfs") {
-  deps = [ "//src/sys/pkg/bin/pkgfs:pkgsvr" ]
-}
-
-go_test_packages = [
-  "allowlist",
-  "index",
-  "pkgfs",
-]
-go_test_components = []
-
-foreach(testpkg, go_test_packages) {
-  test_target = "pmd_${testpkg}_test"
-  go_test(test_target) {
-    gopackages = [ "go.fuchsia.dev/fuchsia/src/sys/pkg/bin/pkgfs/${testpkg}" ]
-    deps = [ ":pmd" ]
-    non_go_deps = [
-      "//sdk/lib/fdio",
-      "//src/lib/storage/ramdevice_client/cpp",
-    ]
-    include_dirs =
-        [ rebase_path("//src/lib/storage/ramdevice_client/cpp/include") ]
-  }
-
-  test_component_target = "${test_target}_component"
-  fuchsia_component(test_component_target) {
-    testonly = true
-    component_name = test_target
-    manifest = "meta/$test_target.cmx"
-    deps = [ ":$test_target" ]
-  }
-  go_test_components += [ ":$test_component_target" ]
-}
-
-fuchsia_test_package("pmd_tests") {
-  deps = [ "//src/storage/bin/blobfs" ]
-  test_components = go_test_components
-}
-
-group("tests") {
-  testonly = true
-  public_deps = [ ":pmd_tests" ]
-}
diff --git a/src/sys/pkg/bin/pkgfs/allowlist/allowlist.go b/src/sys/pkg/bin/pkgfs/allowlist/allowlist.go
deleted file mode 100644
index 219e773..0000000
--- a/src/sys/pkg/bin/pkgfs/allowlist/allowlist.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2020 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package allowlist
-
-import (
-	"bufio"
-	"io"
-	"log"
-	"strings"
-)
-
-// Allowlist is a generic allowlist for strings. Currently used to allow/disallow
-// non-static packages from the /packages subdir
-type Allowlist struct {
-	// Note to future maintainers:
-	// If you're going to do updates to this at runtime
-	// (i.e. after pkgfs startup when there's only one goroutine),
-	// you MUST mediate access to this map with a synchronization mechanism.
-	allowed map[string]struct{}
-}
-
-// LoadFrom takes a file in the form of an io.Reader and returns a built AllowList
-func LoadFrom(f io.Reader) (*Allowlist, error) {
-	allowed := map[string]struct{}{}
-
-	reader := bufio.NewReader(f)
-	for {
-		l, err := reader.ReadString('\n')
-		l = strings.TrimSpace(l)
-		if err != nil {
-			if err == io.EOF {
-				if l == "" {
-					// We're done
-					break
-				} else {
-					// Keep going for one more record
-				}
-			} else {
-				log.Printf("pkgfs: couldn't parse allowlist file: %v", err)
-				return nil, err
-			}
-		}
-		if strings.HasPrefix(l, "#") || l == "" {
-			// This is a comment line in the allowlist
-			continue
-		}
-		allowed[l] = struct{}{}
-	}
-
-	return &Allowlist{allowed: allowed}, nil
-}
-
-// Contains returns whether an Allowlist contains a given string
-func (a *Allowlist) Contains(entry string) bool {
-	_, found := a.allowed[entry]
-	return found
-}
diff --git a/src/sys/pkg/bin/pkgfs/allowlist/allowlist_test.go b/src/sys/pkg/bin/pkgfs/allowlist/allowlist_test.go
deleted file mode 100644
index e323378..0000000
--- a/src/sys/pkg/bin/pkgfs/allowlist/allowlist_test.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2020 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package allowlist
-
-import (
-	"strings"
-	"testing"
-)
-
-type allowListCheckAndResult struct {
-	item          string
-	shouldBeFound bool
-}
-
-var allowlistTests = []struct {
-	allowlistText             string
-	allowlistChecksAndResults []allowListCheckAndResult
-	desiredAllowListLength    int
-}{
-	{
-		// Empty allowlist
-		"#Comment",
-		[]allowListCheckAndResult{
-			{"test", false},
-			{"#Comment", false},
-		},
-		0,
-	},
-	{
-		// Ignores empty lines
-		"foo\n\nbar\n",
-		[]allowListCheckAndResult{
-			{"foo", true},
-			{"bar", true},
-			{"", false},
-		},
-		2,
-	},
-	{
-		// Empty allowlist with a newline at EOF
-		"#Comment\n",
-		[]allowListCheckAndResult{
-			{"test", false},
-			{"#Comment", false},
-		},
-		0,
-	},
-	{
-		// Allowlist with a comment
-		"#Test\nls\ncurl\n",
-		[]allowListCheckAndResult{
-			{"ls", true},
-			{"curl", true},
-			{"iquery", false},
-		},
-		2,
-	},
-	{
-		// Allowlist without a newline at EOF
-		"#Test\nls\ncurl",
-		[]allowListCheckAndResult{
-			{"ls", true},
-			{"curl", true},
-			{"iquery", false},
-		},
-		2,
-	},
-}
-
-func TestAllowlist(t *testing.T) {
-	for _, test := range allowlistTests {
-		t.Run(test.allowlistText, func(t *testing.T) {
-			reader := strings.NewReader(test.allowlistText)
-			allowlist, err := LoadFrom(reader)
-			if err != nil {
-				t.Fatal(err)
-			}
-			for _, check := range test.allowlistChecksAndResults {
-				found := allowlist.Contains(check.item)
-				if found != check.shouldBeFound {
-					t.Errorf("Expected item %s to be found in allowlist: %t. Got: %t", check.item, check.shouldBeFound, found)
-				}
-			}
-
-			if len(allowlist.allowed) != test.desiredAllowListLength {
-				t.Errorf("Expected allowlist to have %d entries, found %d", test.desiredAllowListLength, len(allowlist.allowed))
-			}
-		})
-	}
-}
diff --git a/src/sys/pkg/bin/pkgfs/blobfs/blobfs.go b/src/sys/pkg/bin/pkgfs/blobfs/blobfs.go
deleted file mode 100644
index 1ea0ff5..0000000
--- a/src/sys/pkg/bin/pkgfs/blobfs/blobfs.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2017 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-//go:build !build_with_native_toolchain
-// +build !build_with_native_toolchain
-
-// Package blobfs provides some wrappers around interactions with the blobfs.
-// TODO(raggi): add support for blob garbage collection
-package blobfs
-
-import (
-	"log"
-	"os"
-	"syscall"
-	"syscall/zx"
-	"syscall/zx/fdio"
-
-	"go.fuchsia.dev/fuchsia/src/sys/pkg/bin/pkgfs/iou"
-
-	"fidl/fuchsia/io"
-)
-
-// Manager wraps operations for reading and writing to blobfs, and will later
-// tackle more complex problems such as managing reference counting and garbage
-// collection of blobs.
-type Manager struct {
-	dir *fdio.Directory
-}
-
-// New constructs a new Manager for the blobfs mount at the given root.
-func New(blobDir *fdio.Directory) (*Manager, error) {
-	return &Manager{blobDir}, nil
-}
-
-// Open opens a blobfs blob for reading
-func (m *Manager) Open(root string) (*os.File, error) {
-	return m.OpenFile(root, os.O_RDONLY, 0777)
-}
-
-// OpenFile opens a blobfs path with the given flags
-func (m *Manager) OpenFile(root string, flags int, mode uint32) (*os.File, error) {
-	return iou.OpenFrom(m.dir, root, flags, mode)
-}
-
-// Sync flushes cached packages.
-func (m *Manager) Sync() error {
-	return m.dir.Sync()
-}
-
-// Channel returns an the FDIO directory handle for the blobfs root
-func (m *Manager) Channel() zx.Channel {
-	return zx.Channel(m.dir.Handles()[0])
-}
-
-// HasBlob returns true if the requested blob is available for reading, false otherwise
-func (m *Manager) HasBlob(root string) bool {
-	f, err := m.dir.Open(root, uint32(io.OpenFlagsRightReadable|io.OpenFlagsNotDirectory), io.ModeTypeFile)
-	if err != nil {
-		// if the blob can't be opened for read at all, it doesn't
-		// exist and isn't in the process of being written.
-		return false
-	}
-	defer f.Close()
-
-	if file, ok := f.(*fdio.File); ok {
-		switch status := zx.Sys_object_wait_one(zx.Handle(file.Event), zx.Signals(io.FileSignalReadable), 0, nil); status {
-		case zx.ErrOk:
-			// Blobfs will allow blobs that are in the process of being written to
-			// be opened for read, and it will set zx.SignalUser0 on the blob's
-			// event when it actually becomes readable. For the purposes of pkgfs,
-			// we only have a blob if it exists and is readable.
-			return true
-		case zx.ErrTimedOut:
-		default:
-			log.Printf("blobfs: unknown error asserting blob existence: %s", err)
-		}
-	}
-	return false
-}
-
-func (m *Manager) Blobs() ([]string, error) {
-	d, err := m.OpenFile(".", syscall.O_DIRECTORY, 0777)
-	if err != nil {
-		return nil, err
-	}
-	defer d.Close()
-	return d.Readdirnames(-1)
-}
diff --git a/src/sys/pkg/bin/pkgfs/index/dynamic_index.go b/src/sys/pkg/bin/pkgfs/index/dynamic_index.go
deleted file mode 100644
index 1e7dd4f..0000000
--- a/src/sys/pkg/bin/pkgfs/index/dynamic_index.go
+++ /dev/null
@@ -1,310 +0,0 @@
-// Copyright 2017 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Package index implements a basic index of packages and their relative
-// installation states, as well as thier various top level metadata properties.
-package index
-
-import (
-	"log"
-	"os"
-	"sync"
-
-	"go.fuchsia.dev/fuchsia/src/sys/pkg/bin/pm/pkg"
-)
-
-// DynamicIndex provides concurrency safe access to a dynamic index of packages and package metadata
-type DynamicIndex struct {
-	static *StaticIndex
-
-	// mu protects all following fields
-	mu sync.Mutex
-
-	// roots is a map of merkleroot -> package name/version for active packages
-	roots map[string]pkg.Package
-
-	// index is a map of package name/version -> most recently activated merkleroot
-	index map[pkg.Package]string
-
-	// indexOrdered contains the list of keys in index in initial insertion order
-	//
-	// Used to make the List() order deterministic.
-	indexOrdered []pkg.Package
-
-	// installing is a map of merkleroot -> package name/version
-	installing map[string]pkg.Package
-
-	// needs is a map of blob merkleroot -> set[package merkleroot] for packages that need blobs
-	needs map[string]map[string]struct{}
-
-	// waiting is a map of package merkleroot -> set[blob merkleroots]
-	waiting map[string]map[string]struct{}
-}
-
-// NewDynamic initializes a DynamicIndex
-func NewDynamic(static *StaticIndex) *DynamicIndex {
-	return &DynamicIndex{
-		static:       static,
-		roots:        make(map[string]pkg.Package),
-		index:        make(map[pkg.Package]string),
-		indexOrdered: nil,
-		installing:   make(map[string]pkg.Package),
-		needs:        make(map[string]map[string]struct{}),
-		waiting:      make(map[string]map[string]struct{}),
-	}
-}
-
-// Get looks up a package in the dynamic index, returning it if found.
-func (idx *DynamicIndex) Get(p pkg.Package) (result string, found bool) {
-	idx.mu.Lock()
-	defer idx.mu.Unlock()
-
-	result, found = idx.index[p]
-	return
-}
-
-// List lists every package in the dynamic index in insertion order.
-func (idx *DynamicIndex) List() []pkg.Package {
-	idx.mu.Lock()
-	defer idx.mu.Unlock()
-
-	pkgs := make([]pkg.Package, len(idx.index))
-	copy(pkgs, idx.indexOrdered)
-	return pkgs
-}
-
-// Add adds a package to the index
-func (idx *DynamicIndex) Add(p pkg.Package, root string) error {
-	idx.mu.Lock()
-	defer idx.mu.Unlock()
-	return idx.addLocked(p, root)
-}
-
-func (idx *DynamicIndex) addLocked(p pkg.Package, root string) error {
-	// After being added, a package is not in an installing state.
-	delete(idx.installing, root)
-
-	// Defensive: while we must assume caller-correctness, so these should never
-	// match, if we ever are called, the only safe thing to do is to cleanup state
-	// as best we can, lest external API is littered with leaks from our internal
-	// tracking, which has non-local side effects.
-	delete(idx.waiting, root)
-	for _, needs := range idx.needs {
-		delete(needs, root)
-	}
-
-	if _, found := idx.static.GetRoot(root); found {
-		return os.ErrExist
-	}
-
-	if _, found := idx.static.Get(p); found {
-		// TODO(fxbug.dev/21991): this needs to be removed as the static package set should not
-		// be updated dynamically in future.
-		err := idx.static.Set(p, root)
-
-		return err
-	}
-
-	if oldRoot, ok := idx.index[p]; ok {
-		delete(idx.roots, oldRoot)
-	} else {
-		idx.indexOrdered = append(idx.indexOrdered, p)
-	}
-	idx.index[p] = root
-	idx.roots[root] = p
-	return nil
-}
-
-// Installing marks the given package as being in the process of installing. The
-// package identity is not yet known, and can be updated later using
-// UpdateInstalling.
-func (idx *DynamicIndex) Installing(root string) {
-	idx.mu.Lock()
-	defer idx.mu.Unlock()
-
-	idx.installing[root] = pkg.Package{}
-}
-
-// UpdateInstalling updates the installing index for the given package with an
-// identity once known (that is, once the package meta.far has been able to be
-// opened, so the packages identity is known).
-func (idx *DynamicIndex) UpdateInstalling(root string, p pkg.Package) {
-	idx.mu.Lock()
-	defer idx.mu.Unlock()
-
-	idx.installing[root] = p
-}
-
-// InstallingFailedForPackage removes an entry from the package installation index,
-// this is called when the package meta.far blob is not readable, or the package is
-// not valid.
-func (idx *DynamicIndex) InstallingFailedForPackage(pkgRoot string) {
-	idx.mu.Lock()
-	defer idx.mu.Unlock()
-
-	p := idx.installing[pkgRoot]
-	log.Printf("package failed %s/%s (%s)", p.Name, p.Version, pkgRoot)
-	delete(idx.installing, pkgRoot)
-}
-
-// AddNeeds updates the index about the blobs required in order to activate an
-// installing package. It is possible for the addition of needs to race
-// fulfillment that is happening in other concurrent processes. When that
-// occurs, this method will return os.ErrExist.
-func (idx *DynamicIndex) AddNeeds(root string, needs map[string]struct{}) error {
-	idx.mu.Lock()
-	defer idx.mu.Unlock()
-
-	if _, found := idx.installing[root]; !found {
-		return os.ErrExist
-	}
-
-	for blob := range needs {
-		if _, found := idx.needs[blob]; found {
-			idx.needs[blob][root] = struct{}{}
-		} else {
-			idx.needs[blob] = map[string]struct{}{root: {}}
-		}
-	}
-	// We wait on all of the "needs", that is, all blobs that were not found on the
-	// system at the time of import.
-	idx.waiting[root] = needs
-	return nil
-}
-
-// Fulfill processes the signal that a blob need has been fulfilled. meta.far's
-// are also published through this path, but a meta.far fulfillment does not
-// mean that the package is activated, only that its blob has been written. When
-// a packages 'waiting' set has been emptied, fulfill will call Add, which is
-// the point of activation.
-func (idx *DynamicIndex) Fulfill(need string) {
-	idx.mu.Lock()
-	defer idx.mu.Unlock()
-
-	packageRoots := idx.needs[need]
-	delete(idx.needs, need)
-
-	for pkgRoot := range packageRoots {
-		waiting := idx.waiting[pkgRoot]
-		delete(waiting, need)
-		if len(waiting) == 0 {
-			delete(idx.waiting, pkgRoot)
-			p := idx.installing[pkgRoot]
-			if err := idx.addLocked(p, pkgRoot); err != nil {
-				if os.IsExist(err) {
-					log.Printf("package already exists at fulfillment: %s", err)
-				} else {
-					log.Printf("unexpected error adding package after fulfillment: %s", err)
-				}
-			} else {
-				log.Printf("cached %s/%s (%s)", p.Name, p.Version, pkgRoot)
-			}
-		}
-	}
-}
-
-func (idx *DynamicIndex) PkgHasNeed(pkg, root string) bool {
-	idx.mu.Lock()
-	defer idx.mu.Unlock()
-
-	// TODO(computerdruid): replace this with logic that uses idx.waiting and delete idx.needs
-	needs, found := idx.needs[pkg]
-	if !found {
-		return found
-	}
-	for need := range needs {
-		if need == root {
-			return true
-		}
-	}
-	return false
-}
-
-func (idx *DynamicIndex) PkgNeedsList(pkgRoot string) []string {
-	idx.mu.Lock()
-	defer idx.mu.Unlock()
-
-	pkgNeeds, found := idx.waiting[pkgRoot]
-	if !found {
-		return []string{}
-	}
-	blobs := make([]string, 0, len(pkgNeeds))
-	for blob := range pkgNeeds {
-		blobs = append(blobs, blob)
-	}
-	return blobs
-}
-
-func (idx *DynamicIndex) InstallingList() []string {
-	idx.mu.Lock()
-	defer idx.mu.Unlock()
-
-	names := make([]string, 0, len(idx.installing))
-	for name := range idx.installing {
-		names = append(names, name)
-	}
-	return names
-}
-
-func (idx *DynamicIndex) IsInstalling(merkle string) bool {
-	idx.mu.Lock()
-	defer idx.mu.Unlock()
-
-	_, found := idx.installing[merkle]
-	return found
-}
-
-// GetRoot looks for a package by merkleroot, returning the matching package and
-// true, if found, an empty package and false otherwise.
-func (idx *DynamicIndex) GetRoot(root string) (pkg.Package, bool) {
-	p, found := idx.static.GetRoot(root)
-	if found {
-		return p, found
-	}
-
-	idx.mu.Lock()
-	defer idx.mu.Unlock()
-	p, found = idx.roots[root]
-	return p, found
-}
-
-// PackageBlobs returns the list of blobs which are meta FARs backing packages
-// in the dynamic and static indices.
-func (idx *DynamicIndex) PackageBlobs() []string {
-	packageBlobs := idx.static.PackageBlobs()
-	idx.mu.Lock()
-	dynamicBlobs := make([]string, 0, len(idx.roots))
-	for merkle := range idx.roots {
-		dynamicBlobs = append(dynamicBlobs, string(merkle))
-	}
-	idx.mu.Unlock()
-
-	return append(packageBlobs, dynamicBlobs...)
-}
-
-// AllPackageBlobs aggregates all installing, dynamic and static index package
-// meta.far blobs into a single list. Any errors encountered along the way are
-// logged, but otherwise the best available list is generated under a single
-// lock, to provide a relatively consistent view of objects that must be
-// maintained. This function is intended for use by the GC and the versions
-// directory. The list will not contain duplicates.
-func (idx *DynamicIndex) AllPackageBlobs() []string {
-	allPackageBlobs := make(map[string]struct{})
-	idx.mu.Lock()
-	for blob := range idx.installing {
-		allPackageBlobs[blob] = struct{}{}
-	}
-	idx.mu.Unlock()
-
-	for _, blob := range idx.PackageBlobs() {
-		allPackageBlobs[blob] = struct{}{}
-	}
-
-	blobList := make([]string, 0, len(allPackageBlobs))
-	for blob := range allPackageBlobs {
-		blobList = append(blobList, blob)
-	}
-
-	return blobList
-}
diff --git a/src/sys/pkg/bin/pkgfs/index/dynamic_index_test.go b/src/sys/pkg/bin/pkgfs/index/dynamic_index_test.go
deleted file mode 100644
index 22a0d27b..0000000
--- a/src/sys/pkg/bin/pkgfs/index/dynamic_index_test.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2017 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package index
-
-import (
-	"reflect"
-	"testing"
-
-	"go.fuchsia.dev/fuchsia/src/sys/pkg/bin/pm/pkg"
-)
-
-func TestAdd(t *testing.T) {
-	idx := NewDynamic(NewStatic())
-
-	err := idx.Add(pkg.Package{Name: "foo", Version: "0"}, "abc")
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	err = idx.Add(pkg.Package{Name: "foo", Version: "1"}, "def")
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	err = idx.Add(pkg.Package{Name: "bar", Version: "10"}, "123")
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	pkgs := idx.List()
-	wantPkgs := []pkg.Package{{Name: "foo", Version: "0"}, {Name: "foo", Version: "1"}, {Name: "bar", Version: "10"}}
-	if !reflect.DeepEqual(pkgs, wantPkgs) {
-		t.Errorf("got %q, want %q", pkgs, wantPkgs)
-	}
-}
-
-func TestList(t *testing.T) {
-	idx := NewDynamic(NewStatic())
-
-	pkgs := []pkg.Package{
-		{"foo", "0"},
-		{"bar", "1"},
-	}
-	roots := []string{"abc", "def"}
-	for i, pkg := range pkgs {
-		if err := idx.Add(pkg, roots[i]); err != nil {
-			t.Fatal(err)
-		}
-	}
-
-	list := idx.List()
-	if got, want := len(list), len(pkgs); got != want {
-		t.Errorf("got %d, want %d", got, want)
-	}
-	for i, pkg := range pkgs {
-		if !reflect.DeepEqual(list[i], pkg) {
-			t.Errorf("mismatched package at %d: %v, %v", i, list[i], pkg)
-		}
-	}
-}
-
-func TestFulfill(t *testing.T) {
-	idx := NewDynamic(NewStatic())
-
-	// New package, no blobs pre-installed.
-	{
-		// Start installing package.
-		neededBlobs := map[string]struct{}{
-			"blob1": {},
-			"blob2": {},
-		}
-		idx.Installing("root1")
-		idx.UpdateInstalling("root1", pkg.Package{Name: "foo", Version: "1"})
-		idx.AddNeeds("root1", neededBlobs)
-
-		wantWaiting := map[string]struct{}{
-			"blob1": {},
-			"blob2": {},
-		}
-		if !reflect.DeepEqual(idx.waiting["root1"], wantWaiting) {
-			t.Errorf("got %q, want %q", idx.waiting["root1"], wantWaiting)
-		}
-		wantNeeds := map[string]map[string]struct{}{
-			"blob1": {"root1": {}},
-			"blob2": {"root1": {}},
-		}
-		if !reflect.DeepEqual(idx.needs, wantNeeds) {
-			t.Errorf("got %q, want %q", idx.needs, wantNeeds)
-		}
-		wantInstalling := pkg.Package{Name: "foo", Version: "1"}
-		gotInstalling := idx.installing["root1"]
-		if !reflect.DeepEqual(gotInstalling, wantInstalling) {
-			t.Errorf("got %v, want %v", gotInstalling, wantInstalling)
-		}
-
-		// Fulfill one blob. Package is not done yet.
-		idx.Fulfill("blob1")
-
-		wantWaiting = make(map[string]struct{})
-		wantWaiting["blob2"] = struct{}{}
-		if !reflect.DeepEqual(idx.waiting["root1"], wantWaiting) {
-			t.Errorf("got %q, want %q", idx.waiting["root1"], wantWaiting)
-		}
-		wantNeeds = map[string]map[string]struct{}{
-			"blob2": {"root1": {}},
-		}
-		if !reflect.DeepEqual(idx.needs, wantNeeds) {
-			t.Errorf("got %q, want %q", idx.needs, wantNeeds)
-		}
-
-		// Fulfill other blob. Package is done and appears in index.
-		idx.Fulfill("blob2")
-		if _, ok := idx.waiting["root1"]; ok {
-			t.Errorf("root1 was not deleted from waiting")
-		}
-		wantNeeds = map[string]map[string]struct{}{}
-		if !reflect.DeepEqual(idx.needs, wantNeeds) {
-			t.Errorf("got %q, want %q", idx.needs, wantNeeds)
-		}
-		if _, ok := idx.installing["root1"]; ok {
-			t.Errorf("root1 was not deleted from installing")
-		}
-
-		pkgs := idx.List()
-		wantPkgs := []pkg.Package{{Name: "foo", Version: "1"}}
-		if !reflect.DeepEqual(pkgs, wantPkgs) {
-			t.Errorf("got %q, want %q", pkgs, wantPkgs)
-		}
-	}
-
-	// Second package only needs one blob.
-	{
-		// Start installing package.
-		neededBlobs := map[string]struct{}{
-			"blob4": {},
-		}
-		idx.Installing("root2")
-		idx.UpdateInstalling("root2", pkg.Package{Name: "bar", Version: "2"})
-		idx.AddNeeds("root2", neededBlobs)
-
-		wantWaiting := map[string]struct{}{
-			"blob4": {},
-		}
-		if !reflect.DeepEqual(idx.waiting["root2"], wantWaiting) {
-			t.Errorf("got %q, want %q", idx.waiting, wantWaiting)
-		}
-		wantNeeds := map[string]map[string]struct{}{
-			"blob4": {"root2": {}},
-		}
-		if !reflect.DeepEqual(idx.needs, wantNeeds) {
-			t.Errorf("got %q, want %q", idx.needs, wantNeeds)
-		}
-		wantInstalling := pkg.Package{Name: "bar", Version: "2"}
-		gotInstalling := idx.installing["root2"]
-		if !reflect.DeepEqual(gotInstalling, wantInstalling) {
-			t.Errorf("got %v, want %v", gotInstalling, wantInstalling)
-		}
-
-		// Fulfill blob. Now the package should be marked finished.
-		idx.Fulfill("blob4")
-
-		if _, ok := idx.waiting["root2"]; ok {
-			t.Errorf("root2 was not deleted from waiting")
-		}
-		wantNeeds = map[string]map[string]struct{}{}
-		if !reflect.DeepEqual(idx.needs, wantNeeds) {
-			t.Errorf("got %q, want %q", idx.needs, wantNeeds)
-		}
-		if _, ok := idx.installing["root2"]; ok {
-			t.Errorf("root2 was not deleted from installing")
-		}
-	}
-
-	// This is getting out of hand; now there are two of them
-	pkgs := idx.List()
-	wantPkgs := []pkg.Package{{Name: "foo", Version: "1"}, {Name: "bar", Version: "2"}}
-	if !reflect.DeepEqual(pkgs, wantPkgs) {
-		t.Errorf("got %q, want %q", pkgs, wantPkgs)
-	}
-}
diff --git a/src/sys/pkg/bin/pkgfs/index/static_index.go b/src/sys/pkg/bin/pkgfs/index/static_index.go
deleted file mode 100644
index 7f2a575..0000000
--- a/src/sys/pkg/bin/pkgfs/index/static_index.go
+++ /dev/null
@@ -1,271 +0,0 @@
-// Copyright 2017 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package index
-
-import (
-	"bufio"
-	"io"
-	"log"
-	"os"
-	"sort"
-	"strings"
-	"sync"
-
-	"go.fuchsia.dev/fuchsia/src/sys/pkg/bin/pm/pkg"
-)
-
-// StaticIndex is an index of packages that can not change. It is intended for
-// use during early / verified boot stages to present a unified set of packages
-// from a pre-computed and verifiable index file.
-type StaticIndex struct {
-	mu      sync.RWMutex
-	roots   map[pkg.Package]string
-	updates map[pkg.Package]string
-}
-
-// NewStatic initializes an empty StaticIndex
-func NewStatic() *StaticIndex {
-	return &StaticIndex{roots: map[pkg.Package]string{}}
-}
-
-type indexFileEntry struct {
-	Key    pkg.Package
-	Merkle string
-}
-
-// ParseIndexFile parses the key=value format in static_packages.
-func ParseIndexFile(f io.Reader) (result []indexFileEntry, _ error) {
-	r := bufio.NewReader(f)
-	for {
-		l, err := r.ReadString('\n')
-		l = strings.TrimSpace(l)
-		if err != nil {
-			if err == io.EOF {
-				if l == "" {
-					// We're done
-					break
-				} else {
-					// Keep going for one more record
-				}
-			} else {
-				return nil, err
-			}
-		}
-		parts := strings.SplitN(l, "=", 2)
-
-		if len(parts) == 2 {
-			nameVersion := parts[0]
-			merkle := parts[1]
-
-			if len(merkle) != 64 {
-				log.Printf("index: invalid merkleroot in static manifest: %q", l)
-				continue
-			}
-
-			parts = strings.SplitN(nameVersion, "/", 2)
-			if len(parts) != 2 {
-				log.Printf("index: invalid name/version pair in static manifest: %q", nameVersion)
-				continue
-			}
-			name := parts[0]
-			version := parts[1]
-
-			result = append(result, indexFileEntry{Key: pkg.Package{Name: name, Version: version}, Merkle: merkle})
-		} else {
-			if len(l) > 0 {
-				log.Printf("index: invalid line in static manifest: %q", l)
-			}
-		}
-	}
-	return
-}
-
-// LoadFrom reads a static index from `path` and replaces the index in the
-// receiver with the contents.
-func (idx *StaticIndex) LoadFrom(f io.Reader, systemImage pkg.Package, systemImageMerkleRoot string) error {
-	roots := map[pkg.Package]string{}
-
-	entries, err := ParseIndexFile(f)
-	if err != nil {
-		return err
-	}
-	for _, entry := range entries {
-		roots[entry.Key] = entry.Merkle
-	}
-
-	roots[systemImage] = systemImageMerkleRoot
-
-	idx.mu.Lock()
-	idx.roots = roots
-	idx.updates = make(map[pkg.Package]string)
-	idx.mu.Unlock()
-
-	return nil
-}
-
-// HasStaticName looks for a package with the given `name` in the static static
-// index, ignoring any runtime updates made to the static index.
-func (idx *StaticIndex) HasStaticName(name string) bool {
-	idx.mu.RLock()
-	defer idx.mu.RUnlock()
-
-	for k := range idx.roots {
-		if k.Name == name {
-			return true
-		}
-	}
-	return false
-}
-
-// HasName looks for a package with the given `name`
-func (idx *StaticIndex) HasName(name string) bool {
-	idx.mu.RLock()
-	defer idx.mu.RUnlock()
-
-	for k := range idx.updates {
-		if k.Name == name {
-			return true
-		}
-	}
-
-	for k := range idx.roots {
-		if k.Name == name {
-			return true
-		}
-	}
-	return false
-}
-
-// ListVersions returns the list of version strings given a package name
-func (idx *StaticIndex) ListVersions(name string) []string {
-	idx.mu.RLock()
-	defer idx.mu.RUnlock()
-
-	versions := map[string]struct{}{}
-
-	for k := range idx.updates {
-		if k.Name == name {
-			versions[k.Version] = struct{}{}
-		}
-	}
-	for k := range idx.roots {
-		if k.Name == name {
-			versions[k.Version] = struct{}{}
-		}
-	}
-
-	verList := make([]string, 0, len(versions))
-	for v := range versions {
-		verList = append(verList, v)
-	}
-
-	return verList
-}
-
-// Get looks up the given package, returning (merkleroot, true) if found, or ("", false) otherwise.
-func (idx *StaticIndex) Get(p pkg.Package) (string, bool) {
-	idx.mu.RLock()
-	defer idx.mu.RUnlock()
-	s, ok := idx.roots[p]
-	return s, ok
-}
-
-// GetRoot looks for a package by merkleroot, returning the matching package and
-// true, if found, an empty package and false otherwise.
-func (idx *StaticIndex) GetRoot(root string) (pkg.Package, bool) {
-	idx.mu.RLock()
-	defer idx.mu.RUnlock()
-
-	for p, rt := range idx.updates {
-		if root == rt {
-			return p, true
-		}
-	}
-
-	for p, rt := range idx.roots {
-		if root == rt {
-			return p, true
-		}
-	}
-	return pkg.Package{}, false
-}
-
-// HasStaticRoot looks for a package by merkleroot in the static static index,
-// ignoring any runtime updates made to the static index.
-func (idx *StaticIndex) HasStaticRoot(root string) bool {
-	idx.mu.RLock()
-	defer idx.mu.RUnlock()
-
-	for _, rt := range idx.roots {
-		if root == rt {
-			return true
-		}
-	}
-	return false
-}
-
-// Set sets the given package to the given root. TODO(fxbug.dev/21988) This method should
-// be removed in future, the static index should only be updated as a whole unit
-// via Load.
-func (idx *StaticIndex) Set(p pkg.Package, root string) error {
-	idx.mu.Lock()
-	defer idx.mu.Unlock()
-
-	if idx.roots[p] == root || idx.updates[p] == root {
-		return os.ErrExist
-	}
-
-	idx.updates[p] = root
-	return nil
-}
-
-// List returns the list of packages in byte-lexical order
-func (idx *StaticIndex) List() ([]pkg.Package, error) {
-	idx.mu.RLock()
-	defer idx.mu.RUnlock()
-
-	var pkgs = make(map[pkg.Package]struct{})
-
-	for k := range idx.roots {
-		pkgs[k] = struct{}{}
-	}
-
-	packages := make([]pkg.Package, 0, len(pkgs))
-	for k := range pkgs {
-		packages = append(packages, k)
-	}
-	sort.Sort(pkg.ByNameVersion(packages))
-	return packages, nil
-}
-
-// StaticPacakgeBlobs returns the blobs that are the meta FARs for the packages
-// in the static index and never changes, unlike PackageBlobs() which will also
-// include updated versions of packages in the index.
-func (idx *StaticIndex) StaticPackageBlobs() []string {
-	b := make([]string, 0, len(idx.roots))
-	for _, m := range idx.roots {
-		b = append(b, m)
-	}
-	return b
-}
-
-// PackageBlobs returns the list of blobs which are meta FARs backing packages in the index.
-func (idx *StaticIndex) PackageBlobs() []string {
-
-	var blbs = make(map[string]struct{})
-
-	for _, m := range idx.updates {
-		blbs[m] = struct{}{}
-	}
-	for _, m := range idx.roots {
-		blbs[m] = struct{}{}
-	}
-
-	blobs := make([]string, 0, len(blbs))
-	for m := range blbs {
-		blobs = append(blobs, m)
-	}
-	return blobs
-}
diff --git a/src/sys/pkg/bin/pkgfs/index/static_index_test.go b/src/sys/pkg/bin/pkgfs/index/static_index_test.go
deleted file mode 100644
index 5d37bb3..0000000
--- a/src/sys/pkg/bin/pkgfs/index/static_index_test.go
+++ /dev/null
@@ -1,193 +0,0 @@
-// Copyright 2017 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package index
-
-import (
-	"reflect"
-	"sort"
-	"strings"
-	"testing"
-
-	"go.fuchsia.dev/fuchsia/src/sys/pkg/bin/pm/pkg"
-)
-
-const lotsOf2s = "2222222222222222222222222222222222222222222222222222222222222222"
-
-func setUpStaticIndex(t *testing.T) (*StaticIndex, pkg.Package) {
-	f := strings.NewReader("a/0=331e2e4b22e61fba85c595529103f957d7fe19731a278853361975d639a1bdd8\n")
-	si := NewStatic()
-	systemImage := pkg.Package{
-		Name:    "system_image",
-		Version: "0",
-	}
-	if err := si.LoadFrom(f, systemImage, lotsOf2s); err != nil {
-		t.Fatal(err)
-	}
-	return si, systemImage
-}
-
-func TestStatic(t *testing.T) {
-	si, systemImage := setUpStaticIndex(t)
-
-	expectList := []pkg.Package{{Name: "a", Version: "0"}, systemImage}
-	gotList, err := si.List()
-	if err != nil {
-		t.Fatal(err)
-	}
-	if !reflect.DeepEqual(gotList, expectList) {
-		t.Errorf("static.List() %v != %v", gotList, expectList)
-	}
-
-	if !si.HasName("a") {
-		t.Error("static.HasName(`a`) = false, want true")
-	}
-
-	if si.HasName("b") {
-		t.Error("static.HasName(`b`) = true, want false")
-	}
-
-	getPackageCases := []struct {
-		name, version string
-		result        string
-	}{
-		{"a", "0", "331e2e4b22e61fba85c595529103f957d7fe19731a278853361975d639a1bdd8"},
-		{"a", "1", ""},
-		{"b", "0", ""},
-	}
-	for _, tc := range getPackageCases {
-		if got, _ := si.Get(pkg.Package{Name: tc.name, Version: tc.version}); got != tc.result {
-			t.Errorf("static.Get(%q, %q) = %q want %q", tc.name, tc.version, got, tc.result)
-		}
-	}
-
-	if got, want := si.ListVersions("a"), []string{"0"}; !reflect.DeepEqual(got, want) {
-		t.Errorf("static.ListVersions(`a`) = %v, want %v", got, want)
-	}
-}
-
-func TestStaticUpdatesNotShownInList(t *testing.T) {
-	si, systemImage := setUpStaticIndex(t)
-
-	si.Set(pkg.Package{Name: "a", Version: "1"}, "1111111111111111111111111111111111111111111111111111111111111111")
-	expectList := []pkg.Package{{Name: "a", Version: "0"}, systemImage}
-	gotList, err := si.List()
-
-	if err != nil {
-		t.Fatal(err)
-	}
-	if !reflect.DeepEqual(gotList, expectList) {
-		t.Errorf("static.List() %v != %v", gotList, expectList)
-	}
-}
-
-func TestStaticUpdatesNotShownInGet(t *testing.T) {
-	si, _ := setUpStaticIndex(t)
-
-	si.Set(pkg.Package{Name: "a", Version: "1"}, "1111111111111111111111111111111111111111111111111111111111111111")
-
-	getPackageCases := []struct {
-		name, version string
-		result        string
-	}{
-		{"a", "0", "331e2e4b22e61fba85c595529103f957d7fe19731a278853361975d639a1bdd8"},
-		{"a", "1", ""},
-		{"b", "0", ""},
-	}
-	for _, tc := range getPackageCases {
-		if got, _ := si.Get(pkg.Package{Name: tc.name, Version: tc.version}); got != tc.result {
-			t.Errorf("static.Get(%q, %q) = %q want %q", tc.name, tc.version, got, tc.result)
-		}
-	}
-}
-
-func TestStaticUpdatesShownInListVersions(t *testing.T) {
-	si, _ := setUpStaticIndex(t)
-
-	si.Set(pkg.Package{Name: "a", Version: "1"}, "1111111111111111111111111111111111111111111111111111111111111111")
-	got, want := si.ListVersions("a"), []string{"0", "1"}
-	sort.Strings(got)
-
-	if !reflect.DeepEqual(got, want) {
-		t.Errorf("static.ListVersions(`a`) = %v, want %v", got, want)
-	}
-}
-
-func TestHasName(t *testing.T) {
-	f := strings.NewReader(
-		"static/0=0000000000000000000000000000000000000000000000000000000000000000\n" +
-			"update/0=0000000000000000000000000000000000000000000000000000000000000001\n")
-	si := NewStatic()
-	systemImage := pkg.Package{
-		Name:    "system_image",
-		Version: "0",
-	}
-	if err := si.LoadFrom(f, systemImage, lotsOf2s); err != nil {
-		t.Fatal(err)
-	}
-
-	if err := si.Set(pkg.Package{Name: "update", Version: "0"}, "0000000000000000000000000000000000000000000000000000000000000002"); err != nil {
-		t.Fatal(err)
-	}
-	if err := si.Set(pkg.Package{Name: "new", Version: "0"}, "0000000000000000000000000000000000000000000000000000000000000003"); err != nil {
-		t.Fatal(err)
-	}
-
-	hasNameCases := []struct {
-		name         string
-		static, both bool
-	}{
-		{"static", true, true},
-		{"update", true, true},
-		{"new", false, true},
-		{"unknown", false, false},
-	}
-	for _, tc := range hasNameCases {
-		if got := si.HasStaticName(tc.name); got != tc.static {
-			t.Errorf("static.HasStaticName(%q) = %v want %v", tc.name, got, tc.static)
-		}
-
-		if got := si.HasName(tc.name); got != tc.both {
-			t.Errorf("static.HasName(%q) = %v want %v", tc.name, got, tc.both)
-		}
-	}
-}
-
-func TestHasStaticRoot(t *testing.T) {
-	f := strings.NewReader(
-		"static/0=0000000000000000000000000000000000000000000000000000000000000000\n" +
-			"update/0=0000000000000000000000000000000000000000000000000000000000000001\n")
-	si := NewStatic()
-	systemImage := pkg.Package{
-		Name:    "system_image",
-		Version: "0",
-	}
-
-	if err := si.LoadFrom(f, systemImage, lotsOf2s); err != nil {
-		t.Fatal(err)
-	}
-
-	if err := si.Set(pkg.Package{Name: "update", Version: "0"}, "0000000000000000000000000000000000000000000000000000000000000002"); err != nil {
-		t.Fatal(err)
-	}
-	if err := si.Set(pkg.Package{Name: "new", Version: "0"}, "0000000000000000000000000000000000000000000000000000000000000003"); err != nil {
-		t.Fatal(err)
-	}
-
-	cases := []struct {
-		root          string
-		hasStaticRoot bool
-	}{
-		{"0000000000000000000000000000000000000000000000000000000000000000", true},
-		{"0000000000000000000000000000000000000000000000000000000000000001", true},
-		{"0000000000000000000000000000000000000000000000000000000000000002", false},
-		{"0000000000000000000000000000000000000000000000000000000000000003", false},
-		{"0000000000000000000000000000000000000000000000000000000000000004", false},
-	}
-	for _, tc := range cases {
-		if got := si.HasStaticRoot(tc.root); got != tc.hasStaticRoot {
-			t.Errorf("static.HasRootName(%q) = %v want %v", tc.root, got, tc.hasStaticRoot)
-		}
-	}
-}
diff --git a/src/sys/pkg/bin/pkgfs/iou/iou.go b/src/sys/pkg/bin/pkgfs/iou/iou.go
deleted file mode 100644
index d50fefa..0000000
--- a/src/sys/pkg/bin/pkgfs/iou/iou.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2019 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// +build !build_with_native_toolchain
-
-package iou
-
-import (
-	"os"
-	"syscall"
-	"syscall/zx/fdio"
-)
-
-// OpenFrom opens a path from a give fdio.Directory, using standard Go flags and
-// mode. See os.OpenFile for flags and mode.
-func OpenFrom(parent *fdio.Directory, path string, flags int, mode uint32) (*os.File, error) {
-	zflags := preprocessFlags(syscall.FdioFlagsToZxio(uint32(flags)), mode)
-
-	f, err := parent.Open(path, zflags, mode)
-	if err != nil {
-		return nil, err
-	}
-	return os.NewFile(uintptr(syscall.OpenFDIO(f)), path), nil
-}
-
-func preprocessFlags(flags uint32, mode uint32) uint32 {
-	flagsIncompatibleWithDirectory := (flags&syscall.FsRightWritable != 0) || (flags&syscall.FsFlagCreate != 0)
-	// Special allowance for Mkdir
-	if (flags == syscall.FsFlagCreate|syscall.FsFlagExclusive|syscall.FsRightReadable|syscall.FsRightWritable) &&
-		(mode&syscall.S_IFDIR != 0) {
-		flagsIncompatibleWithDirectory = false
-	}
-	if (flags&syscall.FsFlagDirectory) == 0 && flagsIncompatibleWithDirectory {
-		flags |= syscall.FsFlagNotDirectory
-	}
-	return flags
-}
diff --git a/src/sys/pkg/bin/pkgfs/meta/pkgsvr.cmx b/src/sys/pkg/bin/pkgfs/meta/pkgsvr.cmx
deleted file mode 100644
index f43f80f..0000000
--- a/src/sys/pkg/bin/pkgfs/meta/pkgsvr.cmx
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-    "program": {
-        "binary": "bin/pkgsvr"
-    },
-    "sandbox": {
-        "services": [
-            "fuchsia.sys.Environment"
-        ]
-    }
-}
diff --git a/src/sys/pkg/bin/pkgfs/meta/pmd_allowlist_test.cmx b/src/sys/pkg/bin/pkgfs/meta/pmd_allowlist_test.cmx
deleted file mode 100644
index 63f044b..0000000
--- a/src/sys/pkg/bin/pkgfs/meta/pmd_allowlist_test.cmx
+++ /dev/null
@@ -1,8 +0,0 @@
-{
-    "include": [
-        "syslog/client.shard.cmx"
-    ],
-    "program": {
-        "binary": "test/pmd_allowlist_test"
-    }
-}
diff --git a/src/sys/pkg/bin/pkgfs/meta/pmd_index_test.cmx b/src/sys/pkg/bin/pkgfs/meta/pmd_index_test.cmx
deleted file mode 100644
index 6d56d12..0000000
--- a/src/sys/pkg/bin/pkgfs/meta/pmd_index_test.cmx
+++ /dev/null
@@ -1,13 +0,0 @@
-{
-    "include": [
-        "syslog/client.shard.cmx"
-    ],
-    "program": {
-        "binary": "test/pmd_index_test"
-    },
-    "sandbox": {
-        "features": [
-            "isolated-temp"
-        ]
-    }
-}
diff --git a/src/sys/pkg/bin/pkgfs/meta/pmd_pkgfs_test.cmx b/src/sys/pkg/bin/pkgfs/meta/pmd_pkgfs_test.cmx
deleted file mode 100644
index 7c401d2..0000000
--- a/src/sys/pkg/bin/pkgfs/meta/pmd_pkgfs_test.cmx
+++ /dev/null
@@ -1,20 +0,0 @@
-{
-    "include": [
-        "syslog/client.shard.cmx"
-    ],
-    "program": {
-        "binary": "test/pmd_pkgfs_test"
-    },
-    "sandbox": {
-        "dev": [
-            "sys/platform/00:00:2d/ramctl"
-        ],
-        "features": [
-            "isolated-temp"
-        ],
-        "services": [
-            "fuchsia.process.Launcher",
-            "fuchsia.tracing.provider.Registry"
-        ]
-    }
-}
diff --git a/src/sys/pkg/bin/pkgfs/pkgfs/ctl_directory.go b/src/sys/pkg/bin/pkgfs/pkgfs/ctl_directory.go
deleted file mode 100644
index be44eb5..0000000
--- a/src/sys/pkg/bin/pkgfs/pkgfs/ctl_directory.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2019 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// +build !build_with_native_toolchain
-
-package pkgfs
-
-import (
-	"log"
-	"strings"
-	"sync"
-	"time"
-
-	"go.fuchsia.dev/fuchsia/src/lib/thinfs/fs"
-)
-
-type ctlDirectory struct {
-	unsupportedDirectory
-	fs   *Filesystem
-	mu   sync.RWMutex
-	dirs map[string]fs.Directory
-}
-
-func (d *ctlDirectory) Dup() (fs.Directory, error) {
-	return d, nil
-}
-
-func (d *ctlDirectory) Open(name string, flags fs.OpenFlags) (fs.File, fs.Directory, *fs.Remote, error) {
-	name = clean(name)
-	if name == "" {
-		return nil, d, nil, nil
-	}
-
-	parts := strings.SplitN(name, "/", 2)
-
-	d.mu.RLock()
-	subdir, ok := d.dirs[parts[0]]
-	d.mu.RUnlock()
-	if !ok {
-		return nil, nil, nil, fs.ErrNotFound
-	}
-
-	if len(parts) == 1 {
-		return nil, subdir, nil, nil
-	}
-
-	return subdir.Open(parts[1], flags)
-}
-
-func (d *ctlDirectory) Read() ([]fs.Dirent, error) {
-
-	d.mu.RLock()
-	dirs := make([]fs.Dirent, 0, len(d.dirs))
-	for n := range d.dirs {
-		dirs = append(dirs, dirDirEnt(n))
-	}
-	d.mu.RUnlock()
-	return dirs, nil
-}
-
-func (d *ctlDirectory) Close() error {
-	return nil
-}
-
-func (d *ctlDirectory) Stat() (int64, time.Time, time.Time, error) {
-	return 0, d.fs.mountTime, d.fs.mountTime, nil
-}
-
-func (d *ctlDirectory) Unlink(path string) error {
-	// the "garbage" file is a special control file. When it is unlinked,
-	// we trigger garbage collection.
-	if path == "do-not-use-this-garbage" {
-		if err := d.fs.GC(); err != nil {
-			log.Printf("unlink garbage: %s", err)
-		}
-		return nil
-	}
-
-	return d.unsupportedDirectory.Unlink(path)
-}
-
-func (d *ctlDirectory) Sync() error {
-	return d.fs.blobfs.Sync()
-}
diff --git a/src/sys/pkg/bin/pkgfs/pkgfs/dents.go b/src/sys/pkg/bin/pkgfs/pkgfs/dents.go
deleted file mode 100644
index b2f3fd9..0000000
--- a/src/sys/pkg/bin/pkgfs/pkgfs/dents.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2018 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// +build !build_with_native_toolchain
-
-package pkgfs
-
-import (
-	"go.fuchsia.dev/fuchsia/src/lib/thinfs/fs"
-
-	"fidl/fuchsia/io"
-)
-
-type dirDirEnt string
-
-func (d dirDirEnt) GetType() fs.FileType {
-	return fs.FileTypeDirectory
-}
-
-func (d dirDirEnt) GetIno() uint64 {
-	return io.InoUnknown
-}
-
-func (d dirDirEnt) GetName() string {
-	return string(d)
-}
-
-type fileDirEnt string
-
-func (d fileDirEnt) GetType() fs.FileType {
-	return fs.FileTypeRegularFile
-}
-
-func (d fileDirEnt) GetIno() uint64 {
-	return io.InoUnknown
-}
-
-func (d fileDirEnt) GetName() string {
-	return string(d)
-}
diff --git a/src/sys/pkg/bin/pkgfs/pkgfs/metafar.go b/src/sys/pkg/bin/pkgfs/pkgfs/metafar.go
deleted file mode 100644
index c3f3ad8..0000000
--- a/src/sys/pkg/bin/pkgfs/pkgfs/metafar.go
+++ /dev/null
@@ -1,436 +0,0 @@
-// Copyright 2020 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-//go:build !build_with_native_toolchain
-// +build !build_with_native_toolchain
-
-package pkgfs
-
-import (
-	"encoding/binary"
-	"fmt"
-	"path/filepath"
-	"sort"
-	"strings"
-	"time"
-
-	"fidl/fuchsia/io"
-
-	"go.fuchsia.dev/fuchsia/src/lib/thinfs/fs"
-	"go.fuchsia.dev/fuchsia/src/sys/pkg/lib/far/go"
-
-	"syscall"
-	"syscall/zx"
-	"syscall/zx/fdio"
-	zxio "syscall/zx/io"
-)
-
-func newMetaFar(blob string, fs *Filesystem) (*metaFar, error) {
-	mf := &metaFar{
-		blob: blob,
-		fs:   fs,
-	}
-	fr, err := mf.open()
-	if err != nil {
-		return nil, err
-	}
-
-	contents := fr.List()
-	fr.Close()
-
-	mf.contents = contents
-	return mf, nil
-}
-
-// metaFar is a shared reference to a meta.far or one or more of it's contents.
-type metaFar struct {
-	blob     string
-	contents []string
-
-	fs *Filesystem
-}
-
-func (mf *metaFar) open() (*far.Reader, error) {
-	f, err := mf.fs.blobfs.Open(mf.blob)
-	if err != nil {
-		return nil, err
-	}
-
-	fr, err := far.NewReader(f)
-	if err != nil {
-		f.Close()
-	}
-	return fr, err
-}
-
-func (mf *metaFar) list() []string {
-	return mf.contents
-}
-
-// metaFile is the package dir "meta" opened as a file, which on read returns
-// the merkleroot.
-type metaFile struct {
-	unsupportedFile
-
-	*metaFar
-
-	off   int64
-	flags fs.OpenFlags
-}
-
-func newMetaFile(mf *metaFar, flags fs.OpenFlags) *metaFile {
-	return &metaFile{
-		unsupportedFile("package/meta:" + mf.blob),
-		mf,
-		0,
-		flags,
-	}
-}
-
-func (f *metaFile) Close() error {
-	return nil
-}
-
-func (f *metaFile) GetOpenFlags() fs.OpenFlags {
-	return f.flags
-}
-
-func (f *metaFile) Stat() (int64, time.Time, time.Time, error) {
-	return int64(len(f.blob)), time.Time{}, time.Time{}, nil
-}
-
-func (f *metaFile) Read(p []byte, off int64, whence int) (int, error) {
-	if whence != fs.WhenceFromCurrent {
-		return 0, fs.ErrNotSupported
-	}
-	if f.off+off >= int64(len(f.blob)) {
-		return 0, fs.ErrEOF
-	}
-
-	n := copy(p, f.blob[f.off+off:])
-	f.off += off + int64(n)
-	return n, nil
-}
-
-var _ fs.Directory = (*metaFarDir)(nil)
-
-type metaFarDir struct {
-	unsupportedDirectory
-
-	*metaFar
-
-	path string
-}
-
-func newMetaFarDir(mf *metaFar) *metaFarDir {
-	return &metaFarDir{
-		unsupportedDirectory("package/meta:" + mf.blob),
-		mf,
-		"meta",
-	}
-}
-
-func newMetaFarDirAt(mf *metaFar, path string) *metaFarDir {
-	mfd := newMetaFarDir(mf)
-	mfd.path = path
-	return mfd
-}
-
-func (d *metaFarDir) Close() error {
-
-	return nil
-}
-
-func (d *metaFarDir) Dup() (fs.Directory, error) {
-	return d, nil
-}
-
-func (d *metaFarDir) Open(name string, flags fs.OpenFlags) (fs.File, fs.Directory, *fs.Remote, error) {
-	name = clean(name)
-
-	// Nothing in the meta directory is ever executable.
-	if flags.Execute() {
-		return nil, nil, nil, fs.ErrNotSupported
-	}
-
-	name = filepath.Join(d.path, name)
-
-	if name == "" {
-		if flags.File() || (!flags.Directory() && !flags.Path()) {
-			return newMetaFile(d.metaFar, flags), nil, nil, nil
-		}
-		return nil, d, nil, nil
-	}
-
-	if flags.Create() || flags.Truncate() || flags.Write() || flags.Append() {
-		return nil, nil, nil, fs.ErrNotSupported
-	}
-
-	contents := d.metaFar.list()
-
-	if n := sort.SearchStrings(contents, name); n != len(contents) && contents[n] == name {
-		mff, err := newMetaFarFile(d.metaFar, name, flags)
-		return mff, nil, nil, err
-	}
-
-	dname := name + "/"
-	for _, lname := range contents {
-		if strings.HasPrefix(lname, dname) {
-			return nil, newMetaFarDirAt(d.metaFar, name), nil, nil
-		}
-	}
-
-	return nil, nil, nil, fs.ErrNotFound
-}
-
-func (d *metaFarDir) Read() ([]fs.Dirent, error) {
-	contents := d.metaFar.list()
-
-	// TODO(raggi): improve efficiency
-	dirs := map[string]struct{}{}
-	dents := []fs.Dirent{}
-	dents = append(dents, dirDirEnt("."))
-
-	for _, name := range contents {
-		if !strings.HasPrefix(name, d.path+"/") {
-			continue
-		}
-		name = strings.TrimPrefix(name, d.path+"/")
-
-		parts := strings.SplitN(name, "/", 2)
-		if len(parts) == 2 {
-			if _, ok := dirs[parts[0]]; !ok {
-				dirs[parts[0]] = struct{}{}
-				dents = append(dents, dirDirEnt(parts[0]))
-			}
-
-		} else {
-			dents = append(dents, fileDirEnt(parts[0]))
-		}
-	}
-	return dents, nil
-}
-
-func (d *metaFarDir) Stat() (int64, time.Time, time.Time, error) {
-	// TODO(raggi): forward stat values from the index
-	contents := d.metaFar.list()
-	return int64(len(contents)), d.fs.mountTime, d.fs.mountTime, nil
-}
-
-var _ fs.File = (*metaFarFile)(nil)
-var _ fs.FileWithBackingMemory = (*metaFarFile)(nil)
-
-type metaFarFile struct {
-	unsupportedFile
-
-	*metaFar
-	fr *far.Reader
-	er *far.EntryReader
-
-	off   int64
-	path  string
-	flags fs.OpenFlags
-	// VMO representing this file's view of the archive.
-	vmo *zx.VMO
-
-	// VMO representing the blob backing this entire archive.
-	// TODO(fxbug.dev/52938): It would be more efficient to cache this VMO for all files
-	// within the same meta far to avoid calling GetBuffer() on the same blob
-	// for multiple files.
-	backingBlobVMO *zx.VMO
-}
-
-func newMetaFarFile(mf *metaFar, path string, flags fs.OpenFlags) (*metaFarFile, error) {
-	fr, err := mf.open()
-	if err != nil {
-		return nil, goErrToFSErr(err)
-	}
-	er, err := fr.Open(path)
-	if err != nil {
-		fr.Close()
-		return nil, goErrToFSErr(err)
-	}
-
-	return &metaFarFile{
-		unsupportedFile("package/meta:" + mf.blob + "/" + path),
-		mf,
-		fr,
-		er,
-		0,
-		path,
-		flags,
-		nil,
-		nil,
-	}, nil
-}
-
-func (f *metaFarFile) Close() error {
-	if f.vmo != nil {
-		f.vmo.Close()
-	}
-	if f.backingBlobVMO != nil {
-		f.backingBlobVMO.Close()
-	}
-	f.fr.Close()
-	return nil
-}
-
-func (f *metaFarFile) GetOpenFlags() fs.OpenFlags {
-	return f.flags
-}
-
-func (f *metaFarFile) Dup() (fs.File, error) {
-	fr, err := f.metaFar.open()
-	if err != nil {
-		return nil, goErrToFSErr(err)
-	}
-	er, err := fr.Open(f.path)
-	if err != nil {
-		fr.Close()
-		return nil, goErrToFSErr(err)
-	}
-
-	return &metaFarFile{
-		f.unsupportedFile,
-		f.metaFar,
-		fr,
-		er,
-		0,
-		f.path,
-		f.flags,
-		nil,
-		nil,
-	}, nil
-}
-
-func (f *metaFarFile) Read(p []byte, off int64, whence int) (int, error) {
-	// TODO(raggi): this could allocate less/be far more efficient
-
-	switch whence {
-	case fs.WhenceFromCurrent:
-		f.off += off
-		n, err := f.er.ReadAt(p, f.off)
-		f.off += int64(n)
-		return n, goErrToFSErr(err)
-	case fs.WhenceFromStart:
-		return f.er.ReadAt(p, off)
-	}
-	return 0, fs.ErrNotSupported
-}
-
-func (f *metaFarFile) Seek(offset int64, whence int) (int64, error) {
-	var err error
-	switch whence {
-	case fs.WhenceFromCurrent:
-		f.off = f.off + offset
-	case fs.WhenceFromStart:
-		f.off = offset
-	case fs.WhenceFromEnd:
-		f.off = int64(f.er.Length) + offset
-	default:
-		return 0, fs.ErrInvalidArgs
-	}
-	if err != nil {
-		return f.off, goErrToFSErr(err)
-	}
-	return f.off, nil
-}
-
-func (f *metaFarFile) Stat() (int64, time.Time, time.Time, error) {
-	return int64(f.er.Length), time.Time{}, time.Time{}, nil
-}
-
-func (mf *metaFarFile) getBackingBlobVMO() (*zx.VMO, error) {
-	if mf.backingBlobVMO != nil {
-		return mf.backingBlobVMO, nil
-	}
-	f, err := mf.fs.blobfs.Open(mf.blob)
-	if err != nil {
-		return nil, err
-	}
-	defer f.Close()
-
-	fdioFile := syscall.FDIOForFD(int(f.Fd())).(*fdio.File)
-	result, err := fdioFile.GetBackingMemory(zxio.VmoFlagsRead)
-	if err != nil {
-		return nil, err
-	}
-	switch w := result.Which(); w {
-	case zxio.File2GetBackingMemoryResultErr:
-		return nil, &zx.Error{
-			Status: zx.Status(result.Err),
-			Text:   "File.GetBackingMemory",
-		}
-	case zxio.File2GetBackingMemoryResultResponse:
-		mf.backingBlobVMO = &result.Response.Vmo
-		return mf.backingBlobVMO, nil
-	default:
-		panic(fmt.Sprintf("unhandle variant %d", w))
-	}
-}
-
-func (f *metaFarFile) GetBackingMemory(flags io.VmoFlags) (*zx.VMO, uint64, error) {
-	size := f.er.Length
-	if f.vmo == nil {
-		parentVmo, err := f.getBackingBlobVMO()
-		if err != nil {
-			return nil, 0, fs.ErrIO
-		}
-		// All entries in a FAR are at 4096 byte offsets from the start of the
-		// file and are zero padded up to the next 4096 byte boundary:
-		// https://fuchsia.dev/fuchsia-src/concepts/source_code/archive_format#content_chunk
-		offset := f.er.Offset
-		options := zx.VMOChildOption(zx.VMOChildOptionSnapshotAtLeastOnWrite | zx.VMOChildOptionNoWrite)
-
-		vmo, err := parentVmo.CreateChild(options, offset, size)
-		if err != nil {
-			return nil, 0, fs.ErrIO
-		}
-		f.vmo = &vmo
-	}
-
-	rights := zx.RightsBasic | zx.RightMap | zx.RightsProperty
-
-	if flags&io.VmoFlagsRead != 0 {
-		rights |= zx.RightRead
-	}
-	if flags&io.VmoFlagsWrite != 0 {
-		// Contents of a meta directory are never writable.
-		return nil, 0, fs.ErrReadOnly
-	}
-	if flags&io.VmoFlagsExecute != 0 {
-		// Contents of a meta directory are never executable.
-		return nil, 0, fs.ErrPermission
-	}
-
-	if flags&io.VmoFlagsSharedBuffer != 0 {
-		return nil, 0, fs.ErrNotSupported
-	}
-
-	if flags&io.VmoFlagsPrivateClone != 0 {
-		// Create a separate VMO for the caller if they specified that they want a private copy.
-
-		options := zx.VMOChildOption(zx.VMOChildOptionSnapshotAtLeastOnWrite)
-		options |= zx.VMOChildOptionNoWrite
-		offset := uint64(0)
-		child, err := f.vmo.CreateChild(options, offset, size)
-		if err != nil {
-			return nil, 0, fs.ErrIO
-		}
-		return &child, size, nil
-	}
-
-	// Otherwise, just duplicate our VMO.
-	h, err := f.vmo.Handle().Duplicate(rights)
-	if err != nil {
-		return nil, 0, fs.ErrPermission
-	}
-	var sizeBytes [8]byte
-	binary.LittleEndian.PutUint64(sizeBytes[:], size)
-	if err := h.SetProperty(zx.PropVmoContentSize, sizeBytes[:]); err != nil {
-		return nil, 0, err
-	}
-	vmo := zx.VMO(h)
-	return &vmo, size, nil
-}
diff --git a/src/sys/pkg/bin/pkgfs/pkgfs/needs_directory.go b/src/sys/pkg/bin/pkgfs/pkgfs/needs_directory.go
deleted file mode 100644
index f9144e18..0000000
--- a/src/sys/pkg/bin/pkgfs/pkgfs/needs_directory.go
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright 2018 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// +build !build_with_native_toolchain
-
-package pkgfs
-
-import (
-	"strings"
-	"time"
-
-	"go.fuchsia.dev/fuchsia/src/lib/thinfs/fs"
-)
-
-// needsRoot presents the following tree:
-//  /pkgfs/needs/packages/$PACKAGE_HASH/$BLOB_HASH
-// the files are "needsFile" vnodes, so they're writable to blobfs.
-type needsRoot struct {
-	unsupportedDirectory
-
-	fs *Filesystem
-}
-
-func (d *needsRoot) Dup() (fs.Directory, error) {
-	return d, nil
-}
-
-func (d *needsRoot) Close() error {
-	return nil
-}
-
-func (d *needsRoot) Open(name string, flags fs.OpenFlags) (fs.File, fs.Directory, *fs.Remote, error) {
-	name = clean(name)
-	if name == "" {
-		return nil, d, nil, nil
-	}
-
-	parts := strings.SplitN(name, "/", 2)
-
-	switch parts[0] {
-	case "packages":
-		npr := &needsPkgRoot{unsupportedDirectory: unsupportedDirectory("/needs/packages"), fs: d.fs}
-		if len(parts) > 1 {
-			return npr.Open(parts[1], flags)
-		}
-		return nil, npr, nil, nil
-
-	default:
-		if len(parts) != 1 || flags.Create() {
-			return nil, nil, nil, fs.ErrNotSupported
-		}
-
-		return nil, nil, nil, fs.ErrNotFound
-	}
-}
-
-func (d *needsRoot) Read() ([]fs.Dirent, error) {
-	return []fs.Dirent{dirDirEnt("packages")}, nil
-}
-
-func (d *needsRoot) Stat() (int64, time.Time, time.Time, error) {
-	// TODO(raggi): provide more useful values
-	return 0, d.fs.mountTime, d.fs.mountTime, nil
-}
-
-// needsPkgRoot serves a directory that indexes the blobs needed to fulfill a
-// package that is presently part way through caching.
-type needsPkgRoot struct {
-	unsupportedDirectory
-
-	fs *Filesystem
-}
-
-func (d *needsPkgRoot) Dup() (fs.Directory, error) {
-	return d, nil
-}
-
-func (d *needsPkgRoot) Close() error {
-	return nil
-}
-
-func (d *needsPkgRoot) Open(name string, flags fs.OpenFlags) (fs.File, fs.Directory, *fs.Remote, error) {
-	name = clean(name)
-	if name == "" {
-		return nil, d, nil, nil
-	}
-
-	parts := strings.SplitN(name, "/", 2)
-
-	root := parts[0]
-
-	if !d.fs.index.IsInstalling(root) {
-		return nil, nil, nil, fs.ErrNotFound
-	}
-
-	pkgDir := &needsPkgDir{fs: d.fs, pkgRoot: root}
-
-	if len(parts) > 1 {
-		return pkgDir.Open(parts[1], flags)
-	}
-
-	return nil, pkgDir, nil, nil
-}
-
-func (d *needsPkgRoot) Read() ([]fs.Dirent, error) {
-	blobs := d.fs.index.InstallingList()
-	dirents := make([]fs.Dirent, len(blobs))
-	for i := range blobs {
-		dirents[i] = fileDirEnt(blobs[i])
-	}
-	return dirents, nil
-}
-
-func (d *needsPkgRoot) Stat() (int64, time.Time, time.Time, error) {
-	// TODO(raggi): provide more useful values
-	return 0, d.fs.mountTime, d.fs.mountTime, nil
-}
-
-// needsPkgDir serves a directory that indexes the blobs needed to fulfill a
-// package that is presently part way through caching.
-type needsPkgDir struct {
-	unsupportedDirectory
-
-	fs *Filesystem
-
-	pkgRoot string
-}
-
-func (d *needsPkgDir) Dup() (fs.Directory, error) {
-	return d, nil
-}
-
-func (d *needsPkgDir) Close() error {
-	return nil
-}
-
-func (d *needsPkgDir) Open(name string, flags fs.OpenFlags) (fs.File, fs.Directory, *fs.Remote, error) {
-	name = clean(name)
-	if name == "" {
-		return nil, d, nil, nil
-	}
-
-	if strings.Contains(name, "/") {
-		return nil, nil, nil, fs.ErrNotFound
-	}
-
-	if !d.fs.index.PkgHasNeed(d.pkgRoot, name) {
-		return nil, nil, nil, fs.ErrNotFound
-	}
-
-	return &installFile{fs: d.fs, name: name, isPkg: false}, nil, nil, nil
-}
-
-func (d *needsPkgDir) Read() ([]fs.Dirent, error) {
-	names := d.fs.index.PkgNeedsList(d.pkgRoot)
-	dirents := make([]fs.Dirent, len(names))
-	for i := range names {
-		dirents[i] = fileDirEnt(names[i])
-	}
-	return dirents, nil
-}
-
-func (d *needsPkgDir) Stat() (int64, time.Time, time.Time, error) {
-	// TODO(raggi): provide more useful values
-	return 0, d.fs.mountTime, d.fs.mountTime, nil
-}
diff --git a/src/sys/pkg/bin/pkgfs/pkgfs/package_directory.go b/src/sys/pkg/bin/pkgfs/pkgfs/package_directory.go
deleted file mode 100644
index 7841323..0000000
--- a/src/sys/pkg/bin/pkgfs/pkgfs/package_directory.go
+++ /dev/null
@@ -1,281 +0,0 @@
-// Copyright 2018 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// +build !build_with_native_toolchain
-
-package pkgfs
-
-import (
-	"bytes"
-	"encoding/json"
-	"log"
-	"os"
-	"path/filepath"
-	"strings"
-	"time"
-
-	"go.fuchsia.dev/fuchsia/src/lib/thinfs/fs"
-	"go.fuchsia.dev/fuchsia/src/sys/pkg/bin/pm/pkg"
-	"go.fuchsia.dev/fuchsia/src/sys/pkg/lib/far/go"
-)
-
-type packagedItem struct {
-	blobId     string
-	executable bool
-}
-
-type packageDir struct {
-	unsupportedDirectory
-	fs         *Filesystem
-	merkleroot string
-	contents   map[string]packagedItem
-	executable bool
-
-	// if this packagedir is a subdirectory, then this is the prefix name
-	subdir *string
-}
-
-func newPackageDir(name, version string, filesystem *Filesystem, executable bool) (*packageDir, error) {
-	var merkleroot string
-	var foundInStatic bool
-	p := pkg.Package{Name: name, Version: version}
-	if filesystem.static != nil {
-		merkleroot, foundInStatic = filesystem.static.Get(p)
-	}
-
-	if !foundInStatic {
-		var found bool
-		merkleroot, found = filesystem.index.Get(p)
-		if !found {
-			return nil, fs.ErrNotFound
-		}
-	}
-
-	return newPackageDirFromBlob(merkleroot, filesystem, executable)
-}
-
-func isExecutablePath(path string) bool {
-	// TODO(fxbug.dev/37328): try limiting this to just lib/, bin/, and test/
-	// prefixes?  Or put explicit bits for each file in the manifest.
-	return true
-}
-
-// Initialize a package directory server interface from a package meta.far
-func newPackageDirFromBlob(blob string, filesystem *Filesystem, executable bool) (*packageDir, error) {
-	f, err := filesystem.blobfs.Open(blob)
-	if err != nil {
-		if !os.IsNotExist(err) {
-			log.Printf("pkgfs: failed to open package contents at %q: %s", blob, err)
-		}
-		return nil, goErrToFSErr(err)
-	}
-	defer f.Close()
-
-	fr, err := far.NewReader(f)
-	if err != nil {
-		log.Printf("pkgfs: failed to read meta.far at %q: %s", blob, err)
-		return nil, goErrToFSErr(err)
-	}
-
-	buf, err := fr.ReadFile("meta/package")
-	if err != nil {
-		log.Printf("pkgfs: failed to read meta/package from %q: %s", blob, err)
-		return nil, goErrToFSErr(err)
-	}
-	var p pkg.Package
-	if err := json.Unmarshal(buf, &p); err != nil {
-		log.Printf("pkgfs: failed to parse meta/package from %q: %s", blob, err)
-		return nil, goErrToFSErr(err)
-	}
-
-	buf, err = fr.ReadFile("meta/contents")
-	if err != nil {
-		log.Printf("pkgfs: failed to read meta/contents from %q: %s", blob, err)
-		return nil, goErrToFSErr(err)
-	}
-
-	pd := packageDir{
-		unsupportedDirectory: unsupportedDirectory("package:" + blob),
-		merkleroot:           blob,
-		fs:                   filesystem,
-		contents:             map[string]packagedItem{},
-		executable:           executable,
-	}
-
-	lines := bytes.Split(buf, []byte("\n"))
-
-	for _, line := range lines {
-		line = bytes.TrimSpace(line)
-		if len(line) == 0 {
-			continue
-		}
-		parts := bytes.SplitN(line, []byte("="), 2)
-		if len(parts) != 2 {
-			log.Printf("pkgfs: bad contents line: %v", line)
-			continue
-		}
-		path := string(parts[0])
-		pd.contents[path] = packagedItem{
-			blobId:     string(parts[1]),
-			executable: isExecutablePath(path),
-		}
-	}
-	if err != nil {
-		return nil, goErrToFSErr(err)
-	}
-
-	pd.contents["meta"] = packagedItem{
-		blobId:     blob,
-		executable: false,
-	}
-	for _, name := range fr.List() {
-		if !strings.HasPrefix(name, "meta/") {
-			log.Printf("package:%s illegal file in meta.far: %q", pd.merkleroot, name)
-			continue
-		}
-		pd.contents[name] = packagedItem{
-			blobId:     name,
-			executable: false,
-		}
-	}
-
-	return &pd, nil
-}
-
-func (d *packageDir) Close() error {
-	return nil
-}
-
-func (d *packageDir) Dup() (fs.Directory, error) {
-	return d, nil
-}
-
-func (d *packageDir) getBlobFor(path string) (string, bool) {
-	root, ok := d.contents[path]
-	return root.blobId, ok
-}
-
-func (d *packageDir) Open(name string, flags fs.OpenFlags) (fs.File, fs.Directory, *fs.Remote, error) {
-	name = clean(name)
-
-	if d.subdir != nil {
-		name = filepath.Join(*d.subdir, name)
-	}
-
-	if name == "" {
-		return nil, d, nil, nil
-	}
-
-	if flags.Create() || flags.Truncate() || flags.Write() || flags.Append() {
-		return nil, nil, nil, fs.ErrNotSupported
-	}
-
-	if name == "meta" {
-		mf, err := newMetaFar(d.contents[name].blobId, d.fs)
-		if err != nil {
-			return nil, nil, nil, err
-		}
-
-		if flags.File() || (!flags.Directory() && !flags.Path()) {
-			mff := newMetaFile(mf, flags)
-			return mff, nil, nil, nil
-		}
-		mfd := newMetaFarDir(mf)
-		return nil, mfd, nil, nil
-	}
-
-	if strings.HasPrefix(name, "meta/") {
-		mf, err := newMetaFar(d.contents["meta"].blobId, d.fs)
-		if err != nil {
-			return nil, nil, nil, err
-		}
-
-		mfd := newMetaFarDir(mf)
-		return mfd.Open(strings.TrimPrefix(name, "meta"), flags)
-	}
-
-	if root, ok := d.contents[name]; ok {
-		if flags.Execute() {
-			if !root.executable {
-				return nil, nil, nil, fs.ErrPermission
-			}
-
-			// TODO(fxbug.dev/48930) Remove this temporary feature when possible.
-			if !d.executable && d.fs.enforceNonBaseExecutabilityRestrictions {
-				log.Printf("pkgfs: attempted executable open of %s. This is not allowed due to executability restrictions in pkgfs. See fxbug.dev/48902", name)
-				return nil, nil, nil, fs.ErrPermission
-			}
-		}
-		return nil, nil, &fs.Remote{Channel: d.fs.blobfs.Channel(), Path: root.blobId, Flags: flags}, nil
-
-	}
-
-	dirname := name + "/"
-	for k := range d.contents {
-		if strings.HasPrefix(k, dirname) {
-			// subdir is a copy of d, but with subdir set
-			subdir := *d
-			subdir.subdir = &dirname
-			return nil, &subdir, nil, nil
-		}
-	}
-
-	return nil, nil, nil, fs.ErrNotFound
-}
-
-func (d *packageDir) Read() ([]fs.Dirent, error) {
-	// TODO(raggi): improve efficiency
-	dirs := map[string]struct{}{}
-	dents := []fs.Dirent{}
-	dents = append(dents, dirDirEnt("."))
-
-	if d.subdir == nil {
-		dirs["meta"] = struct{}{}
-		dents = append(dents, dirDirEnt("meta"))
-	}
-
-	for name := range d.contents {
-		if d.subdir != nil {
-			if !strings.HasPrefix(name, *d.subdir) {
-				continue
-			}
-			name = strings.TrimPrefix(name, *d.subdir)
-		}
-
-		parts := strings.SplitN(name, "/", 2)
-		if len(parts) == 2 {
-			if _, ok := dirs[parts[0]]; !ok {
-				dirs[parts[0]] = struct{}{}
-				dents = append(dents, dirDirEnt(parts[0]))
-			}
-
-		} else {
-			// TODO(fxbug.dev/22014): fix the potential for discrepancies here
-			// most of the time there are no pointers in contents for dirs, but the
-			// exception is the meta pointer which this would mistake for a file, so we
-			// must check for a name collision here too.
-			if _, ok := dirs[parts[0]]; !ok {
-				dents = append(dents, fileDirEnt(parts[0]))
-			}
-		}
-	}
-	return dents, nil
-}
-
-func (d *packageDir) Stat() (int64, time.Time, time.Time, error) {
-	// TODO(raggi): forward stat values from the index
-	return 0, d.fs.mountTime, d.fs.mountTime, nil
-}
-
-func (d *packageDir) Blobs() []string {
-	// TODO(fxbug.dev/22235) consider preallocation which would over-allocate, but cause less thrash
-	blobs := []string{}
-	for path, blob := range d.contents {
-		if strings.HasPrefix(path, "meta/") {
-			continue
-		}
-		blobs = append(blobs, blob.blobId)
-	}
-	return blobs
-}
diff --git a/src/sys/pkg/bin/pkgfs/pkgfs/package_install_directory.go b/src/sys/pkg/bin/pkgfs/pkgfs/package_install_directory.go
deleted file mode 100644
index 03895bb..0000000
--- a/src/sys/pkg/bin/pkgfs/pkgfs/package_install_directory.go
+++ /dev/null
@@ -1,438 +0,0 @@
-// Copyright 2018 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-//go:build !build_with_native_toolchain
-// +build !build_with_native_toolchain
-
-package pkgfs
-
-import (
-	"bytes"
-	"encoding/json"
-	"log"
-	"os"
-	"regexp"
-	"strings"
-	"syscall/zx"
-	"time"
-
-	"go.fuchsia.dev/fuchsia/src/lib/thinfs/fs"
-	"go.fuchsia.dev/fuchsia/src/sys/pkg/bin/pm/pkg"
-	"go.fuchsia.dev/fuchsia/src/sys/pkg/lib/far/go"
-)
-
-const (
-	identityBlob = "15ec7bf0b50732b49f8228e07d24365338f9e3ab994b00af08e5a3bffe55fd8b"
-)
-
-var merklePat = regexp.MustCompile("^[0-9a-f]{64}$")
-
-/*
-The following VNodes are defined in this file:
-
-/install           - installDir
-/install/pkg       - installPkgDir
-/install/pkg/{f}   - installFile{isPkg:true}
-/install/blob      - installBlobDir
-/install/blob/{f}  - installFile{isPkg:false}
-*/
-
-// installDir is located at /install.
-type installDir struct {
-	unsupportedDirectory
-
-	fs *Filesystem
-}
-
-func (d *installDir) Dup() (fs.Directory, error) {
-	return d, nil
-}
-
-func (d *installDir) Stat() (int64, time.Time, time.Time, error) {
-	return 0, d.fs.mountTime, d.fs.mountTime, nil
-}
-
-func (d *installDir) Open(name string, flags fs.OpenFlags) (fs.File, fs.Directory, *fs.Remote, error) {
-	name = clean(name)
-	if name == "" {
-		return nil, d, nil, nil
-	}
-
-	parts := strings.SplitN(name, "/", 2)
-
-	var nd fs.Directory
-	switch parts[0] {
-	case "pkg":
-		nd = &installPkgDir{fs: d.fs}
-	case "blob":
-		nd = &installBlobDir{fs: d.fs}
-	default:
-		return nil, nil, nil, fs.ErrNotFound
-	}
-
-	if len(parts) == 1 {
-		if flags.Directory() {
-			return nil, nd, nil, nil
-		}
-		return nil, nd, nil, fs.ErrNotSupported
-	}
-
-	return nd.Open(parts[1], flags)
-}
-
-func (d *installDir) Read() ([]fs.Dirent, error) {
-	return []fs.Dirent{dirDirEnt("pkg"), dirDirEnt("blob")}, nil
-}
-
-func (d *installDir) Close() error {
-	return nil
-}
-
-// installPkgDir is located at /install/pkg
-type installPkgDir struct {
-	unsupportedDirectory
-
-	fs *Filesystem
-}
-
-func (d *installPkgDir) Dup() (fs.Directory, error) {
-	return d, nil
-}
-
-func (d *installPkgDir) Stat() (int64, time.Time, time.Time, error) {
-	return 0, d.fs.mountTime, d.fs.mountTime, nil
-}
-
-func (d *installPkgDir) Open(name string, flags fs.OpenFlags) (fs.File, fs.Directory, *fs.Remote, error) {
-	name = clean(name)
-	if name == "" {
-		return nil, d, nil, nil
-	}
-
-	if !flags.Create() {
-		return nil, nil, nil, fs.ErrNotSupported
-	}
-
-	f := &installFile{fs: d.fs, name: name, isPkg: true}
-	err := f.open()
-	return f, nil, nil, err
-}
-
-func (d *installPkgDir) Read() ([]fs.Dirent, error) {
-	return []fs.Dirent{}, nil
-}
-
-func (d *installPkgDir) Close() error {
-	return nil
-}
-
-// installBlobDir is located at /install/blob
-type installBlobDir struct {
-	unsupportedDirectory
-
-	fs *Filesystem
-}
-
-func (d *installBlobDir) Dup() (fs.Directory, error) {
-	return d, nil
-}
-
-func (d *installBlobDir) Stat() (int64, time.Time, time.Time, error) {
-	return 0, d.fs.mountTime, d.fs.mountTime, nil
-}
-
-func (d *installBlobDir) Open(name string, flags fs.OpenFlags) (fs.File, fs.Directory, *fs.Remote, error) {
-	name = clean(name)
-	if name == "" {
-		return nil, d, nil, nil
-	}
-
-	// TODO(raggi): support write resumption..
-
-	if !flags.Create() {
-		return nil, nil, nil, fs.ErrNotSupported
-	}
-
-	f := &installFile{fs: d.fs, name: name, isPkg: false}
-	var err error
-	if !flags.Path() {
-		err = f.open()
-	}
-	return f, nil, nil, err
-}
-
-func (d *installBlobDir) Read() ([]fs.Dirent, error) {
-	return []fs.Dirent{}, nil
-}
-
-func (d *installBlobDir) Close() error {
-	return nil
-}
-
-type installFile struct {
-	unsupportedFile
-	fs *Filesystem
-
-	isPkg bool
-
-	size    uint64
-	written uint64
-	name    string
-
-	blob *os.File
-}
-
-func (f *installFile) open() error {
-	if !merklePat.Match([]byte(f.name)) {
-		return fs.ErrInvalidArgs
-	}
-
-	var err error
-
-	// TODO(raggi): propagate flags instead to allow for resumption and so on
-	f.blob, err = f.fs.blobfs.OpenFile(f.name, os.O_WRONLY|os.O_CREATE, 0777)
-
-	// When opening a blob for write, blobfs returns a permission error if
-	// the blob is in the process of being written or already exists. If we
-	// can confirm the blob is readable, report to the caller that it
-	// already exists. Otherwise, bubble the error to the caller without
-	// fulfilling the need.
-	if os.IsPermission(err) {
-		if !f.fs.blobfs.HasBlob(f.name) {
-			return goErrToFSErr(err)
-		}
-
-		// "Fulfill" any needs against the blob that was attempted to be written.
-		f.fs.index.Fulfill(f.name)
-
-		if f.isPkg {
-			// Importing a package file that is already present in blobfs could fail for
-			// a number of reasons, such as the package being invalid, and so on. We need
-			// to report such cases back to the caller. In the case where we import fine,
-			// we must fall through to passing ErrAlreadyExists back to the caller, so
-			// that they know that the package file itself is already complete. Getting
-			// `fs.ErrAlreadyExists` on a package meta.far write does not in and of
-			// itself indicate that the whole package is present, only that the package
-			// metadata blob is present.
-			if err := f.importPackage(); err != nil {
-				return err
-			}
-		}
-		return fs.ErrAlreadyExists
-	}
-
-	if err != nil {
-		return goErrToFSErr(err)
-	}
-
-	if f.isPkg {
-		f.fs.index.Installing(f.name)
-	}
-	return nil
-}
-
-func (f *installFile) Write(p []byte, off int64, whence int) (int, error) {
-	if whence != fs.WhenceFromCurrent || off != 0 {
-		return 0, goErrToFSErr(&zx.Error{Status: zx.ErrNotSupported})
-	}
-
-	// It is illegal to write past the truncated size of a blob.
-	if f.written > f.size {
-		return 0, goErrToFSErr(&zx.Error{Status: zx.ErrInvalidArgs})
-	}
-
-	n, err := f.blob.Write(p)
-	f.written += uint64(n)
-
-	if f.written >= f.size && err == nil {
-		// "Fulfill" any needs against the blob that was attempted to be written.
-		f.fs.index.Fulfill(f.name)
-
-		if f.isPkg {
-			// If a package installation fails, the error is returned here.
-			return n, goErrToFSErr(f.importPackage())
-		}
-	}
-
-	return n, goErrToFSErr(err)
-}
-
-func (f *installFile) Close() error {
-	if f.blob == nil {
-		return nil
-	}
-
-	if err := f.blob.Close(); err != nil {
-		log.Printf("error closing file: %s\n", err)
-		return goErrToFSErr(err)
-	}
-
-	return nil
-}
-
-func (f *installFile) Stat() (int64, time.Time, time.Time, error) {
-	return int64(f.written), time.Time{}, time.Time{}, nil
-}
-
-func (f *installFile) Truncate(sz uint64) error {
-	var err error
-
-	f.size = sz
-	err = f.blob.Truncate(int64(f.size))
-
-	if f.size == 0 && f.name == identityBlob && err == nil {
-		// Fulfill any needs against the identity blob
-		f.fs.index.Fulfill(f.name)
-	}
-
-	return goErrToFSErr(err)
-}
-
-// importPackage uses f.name to import the package that was just written. It
-// returns an fs.Error to report back to the user at write time.
-func (f *installFile) importPackage() error {
-	return importPackage(f.fs, f.name)
-}
-
-// importPackage uses name to import the package that was just written. It
-// returns an fs.Error to report back to the user at write time.
-func importPackage(f *Filesystem, name string) error {
-	b, err := f.blobfs.Open(name)
-	if err != nil {
-		f.index.InstallingFailedForPackage(name)
-		log.Printf("error opening package blob after writing: %s: %s", name, err)
-		return fs.ErrFailedPrecondition
-	}
-	defer b.Close()
-
-	r, err := far.NewReader(b)
-	if err != nil {
-		f.index.InstallingFailedForPackage(name)
-		log.Printf("error reading package archive: %s", err)
-		// Note: translates to zx.ErrBadState
-		return fs.ErrFailedPrecondition
-	}
-
-	pf, err := r.ReadFile("meta/package")
-	if err != nil {
-		f.index.InstallingFailedForPackage(name)
-		log.Printf("error reading package metadata: %s", err)
-		// Note: translates to zx.ErrBadState
-		return fs.ErrFailedPrecondition
-	}
-
-	var p pkg.Package
-	err = json.Unmarshal(pf, &p)
-	if err != nil {
-		f.index.InstallingFailedForPackage(name)
-		log.Printf("error parsing package metadata: %s", err)
-		// Note: translates to zx.ErrBadState
-		return fs.ErrFailedPrecondition
-	}
-
-	if err := p.Validate(); err != nil {
-		f.index.InstallingFailedForPackage(name)
-		log.Printf("package is invalid: %s", err)
-		// Note: translates to zx.ErrBadState
-		return fs.ErrFailedPrecondition
-	}
-
-	// Tell the index the identity of this package.
-	f.index.UpdateInstalling(name, p)
-
-	contents, err := r.ReadFile("meta/contents")
-	if err != nil {
-		log.Printf("error parsing package contents file for %s: %s", p, err)
-		// Note: translates to zx.ErrBadState
-		return fs.ErrFailedPrecondition
-	}
-
-	files := bytes.Split(contents, []byte{'\n'})
-
-	// Note: the following heuristic is coarse and not easy to compute. For small
-	// packages enumerating all blobs in blobfs will be slow, but for very large
-	// packages it's more likely that polling blobfs the expensive way for missing
-	// blobs is going to be expensive. This can be improved by blobfs providing an
-	// API to handle this case of "which of the following blobs are already
-	// readable"
-	mayHaveBlob := func(root string) bool { return true }
-	if len(files) > 20 {
-		dnames, err := f.blobfs.Blobs()
-		if err != nil {
-			log.Printf("error readdir blobfs: %s", err)
-			// Note: translates to zx.ErrBadState
-			return fs.ErrFailedPrecondition
-		}
-		names := map[string]struct{}{}
-		for _, name := range dnames {
-			names[name] = struct{}{}
-		}
-		mayHaveBlob = func(root string) bool {
-			_, found := names[root]
-			return found
-		}
-	}
-
-	needBlobs := make(map[string]struct{})
-	foundBlobs := make(map[string]struct{})
-	needsCount := 0
-	for i := range files {
-		// Silence apparent errors from last line in file/empty lines.
-		if len(files[i]) == 0 {
-			continue
-		}
-		parts := bytes.SplitN(files[i], []byte{'='}, 2)
-		if len(parts) != 2 {
-			log.Printf("skipping bad package entry: %q", files[i])
-			continue
-		}
-		root := string(parts[1])
-
-		if mayHaveBlob(root) && f.blobfs.HasBlob(root) {
-			foundBlobs[root] = struct{}{}
-			continue
-		}
-
-		needsCount++
-		needBlobs[root] = struct{}{}
-	}
-
-	// NOTE: the EEXIST returned here is sometimes not strictly "this package was
-	// already activated", as the package may have just been activated during the
-	// above loop that calls fulfill with each blob that is found that already
-	// exists. Doing otherwise would significantly harm performance, as it would
-	// require a strong consistency rather than an eventual consistency model, that
-	// requires global locking of the filesystem. What this means in the not
-	// entirely correct case is either:
-	// a) we raced another process that was fulfilling the same needs as this
-	//    package.
-	// b) all of the content was already on the system, but the package was missing
-	//    from the index.
-	// This state could be improved if there was an in-memory precomputed index of
-	// all active meta.far blobs on the system.
-	if needsCount == 0 {
-		// It is possible that we already had all of the content for a package at the
-		// time when importPackage starts, for example if a package is updated and
-		// then reverted to a prior version wihtout GC. In that case, we should still
-		// activate the package, even though there is nothing to fulfill.
-		f.index.Add(p, name)
-		return fs.ErrAlreadyExists
-	}
-
-	// We tell the index about needs that we have which were explicitly not found
-	// on the system.
-	err = goErrToFSErr(f.index.AddNeeds(name, needBlobs))
-
-	// In order to ensure eventual consistency in the case where multiple processes
-	// are racing on these needs, we must re-publish all of those fulfillments
-	// after publishing the locally discovered needs.
-	for blob := range foundBlobs {
-		f.index.Fulfill(blob)
-	}
-
-	// AddNeeds may return os.ErrExist if the package activation won the race
-	// between our needs check loop above, and the following registration of the
-	// packages needs.
-	return err
-}
diff --git a/src/sys/pkg/bin/pkgfs/pkgfs/package_list_directories.go b/src/sys/pkg/bin/pkgfs/pkgfs/package_list_directories.go
deleted file mode 100644
index 91c5801..0000000
--- a/src/sys/pkg/bin/pkgfs/pkgfs/package_list_directories.go
+++ /dev/null
@@ -1,208 +0,0 @@
-// Copyright 2018 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// +build !build_with_native_toolchain
-
-package pkgfs
-
-import (
-	"log"
-	"path/filepath"
-	"strings"
-	"time"
-
-	"go.fuchsia.dev/fuchsia/src/lib/thinfs/fs"
-)
-
-type packagesRoot struct {
-	unsupportedDirectory
-
-	fs                        *Filesystem
-	enforceNonStaticAllowlist bool
-}
-
-func (pr *packagesRoot) isAllowedNonStaticPackage(packageName string) bool {
-	if pr.fs.allowedNonStaticPackages == nil {
-		return false
-	}
-
-	return pr.fs.allowedNonStaticPackages.Contains(packageName)
-}
-
-func dynamicIndexHasPackageName(f *Filesystem, name string) bool {
-	pkgs := f.index.List()
-	found := false
-	for _, p := range pkgs {
-		if p.Name == name {
-			found = true
-			break
-		}
-	}
-
-	return found
-}
-
-func (pr *packagesRoot) Dup() (fs.Directory, error) {
-	return pr, nil
-}
-
-func (pr *packagesRoot) Close() error { return nil }
-
-func (pr *packagesRoot) Open(name string, flags fs.OpenFlags) (fs.File, fs.Directory, *fs.Remote, error) {
-	name = clean(name)
-
-	if name == "" {
-		return nil, pr, nil, nil
-	}
-
-	parts := strings.Split(name, "/")
-
-	packageName := parts[0]
-
-	// Check for /pkgfs/packages visibility restrictions - we only want to show base packages, and packages on an allowlist
-	if pr.fs.static == nil || (pr.fs.static != nil && !pr.fs.static.HasName(packageName)) {
-		// This package isn't in the static index
-
-		if !pr.isAllowedNonStaticPackage(packageName) {
-			// This package isn't allowed by the allowlist to show up
-			// Check if the package is currently on the disk
-			// If it's not on the disk at all, we'll return ErrNotFound later, so do nothing special here
-
-			if dynamicIndexHasPackageName(pr.fs, packageName) {
-				// This package is on the system (indicated by presence in the _dynamic_ index),
-				// but we don't want it to be accessed. Log an error
-				log.Printf("pkgfs: attempted open of non-static package %s from /pkgfs/packages, which is deprecated. Please use the package resolver for accessing package directories, or /pkg to access your own package. See fxbug.dev/44527. Full path: %s", packageName, name)
-
-				if pr.enforceNonStaticAllowlist {
-					return nil, nil, nil, fs.ErrPermission
-				}
-			}
-		}
-	}
-
-	pld, err := newPackageListDir(packageName, pr.fs)
-	if err != nil {
-		// If the package isn't on the system at all, we'll return an error here
-		return nil, nil, nil, err
-	}
-
-	if len(parts) > 1 {
-		// We were asked for a specific variant, so return a package directory itself, not just the list of variants
-		file, directory, remote, err := pld.Open(filepath.Join(parts[1:]...), flags)
-		return file, directory, remote, err
-	}
-
-	// Return the list of variants
-	return nil, pld, nil, nil
-}
-
-func (pr *packagesRoot) Read() ([]fs.Dirent, error) {
-	var names = map[string]struct{}{}
-	if pr.fs.static != nil {
-		pkgs, err := pr.fs.static.List()
-		if err != nil {
-			return nil, err
-		}
-		for _, p := range pkgs {
-			names[p.Name] = struct{}{}
-		}
-	}
-
-	pkgs := pr.fs.index.List()
-	for _, p := range pkgs {
-		// Check visibility restrictions - only return non-base packages on an allowlist
-		if pr.enforceNonStaticAllowlist && !pr.isAllowedNonStaticPackage(p.Name) {
-			continue
-		}
-		names[p.Name] = struct{}{}
-	}
-
-	dirents := make([]fs.Dirent, 0, len(names))
-	for name := range names {
-		dirents = append(dirents, dirDirEnt(name))
-	}
-	return dirents, nil
-}
-
-func (pr *packagesRoot) Stat() (int64, time.Time, time.Time, error) {
-	// TODO(raggi): stat the index directory and pass on info
-	return 0, pr.fs.mountTime, pr.fs.mountTime, nil
-}
-
-// packageListDir is a directory in the pkgfs packages directory for an
-// individual package that lists all versions of packages
-type packageListDir struct {
-	unsupportedDirectory
-	fs          *Filesystem
-	packageName string
-}
-
-func newPackageListDir(name string, f *Filesystem) (*packageListDir, error) {
-	if !f.static.HasName(name) {
-		if !dynamicIndexHasPackageName(f, name) {
-			return nil, fs.ErrNotFound
-		}
-	}
-
-	pld := packageListDir{
-		unsupportedDirectory: unsupportedDirectory(filepath.Join("/packages", name)),
-		fs:                   f,
-		packageName:          name,
-	}
-	return &pld, nil
-}
-
-func (pld *packageListDir) Dup() (fs.Directory, error) {
-	return pld, nil
-}
-
-func (pld *packageListDir) Close() error {
-	return nil
-}
-
-func (pld *packageListDir) Open(name string, flags fs.OpenFlags) (fs.File, fs.Directory, *fs.Remote, error) {
-	name = clean(name)
-
-	if name == "" {
-		return nil, pld, nil, nil
-	}
-
-	parts := strings.Split(name, "/")
-
-	d, err := newPackageDir(pld.packageName, parts[0], pld.fs, pld.fs.shouldAllowExecutableOpenByPackageName(pld.packageName))
-	if err != nil {
-		return nil, nil, nil, err
-	}
-
-	if len(parts) > 1 {
-		return d.Open(filepath.Join(parts[1:]...), flags)
-	}
-	return nil, d, nil, nil
-}
-
-func (pld *packageListDir) Read() ([]fs.Dirent, error) {
-	if pld.fs.static != nil && pld.fs.static.HasName(pld.packageName) {
-		versions := pld.fs.static.ListVersions(pld.packageName)
-		dirents := make([]fs.Dirent, len(versions))
-		for i := range versions {
-			dirents[i] = dirDirEnt(versions[i])
-		}
-		return dirents, nil
-	}
-
-	var dirents []fs.Dirent
-	pkgs := pld.fs.index.List()
-	for _, p := range pkgs {
-		if p.Name == pld.packageName {
-			dirents = append(dirents, dirDirEnt(p.Version))
-		}
-	}
-
-	return dirents, nil
-}
-
-func (pld *packageListDir) Stat() (int64, time.Time, time.Time, error) {
-	// TODO(raggi): stat the index directory and pass on info
-	return 0, pld.fs.mountTime, pld.fs.mountTime, nil
-}
diff --git a/src/sys/pkg/bin/pkgfs/pkgfs/pkgfs.go b/src/sys/pkg/bin/pkgfs/pkgfs/pkgfs.go
deleted file mode 100644
index 7961ab3..0000000
--- a/src/sys/pkg/bin/pkgfs/pkgfs/pkgfs.go
+++ /dev/null
@@ -1,484 +0,0 @@
-// Copyright 2017 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// +build !build_with_native_toolchain
-
-// Package pkgfs hosts a filesystem for interacting with packages that are
-// stored on a host. It presents a tree of packages that are locally available
-// and a tree that enables a user to add new packages and/or package content to
-// the host.
-package pkgfs
-
-import (
-	"fmt"
-	"io"
-	"log"
-	"os"
-	"path"
-	"path/filepath"
-	"runtime"
-	"sync"
-	"syscall/zx"
-	"syscall/zx/fdio"
-	"time"
-
-	"go.fuchsia.dev/fuchsia/src/lib/thinfs/fs"
-	"go.fuchsia.dev/fuchsia/src/lib/thinfs/zircon/rpc"
-	"go.fuchsia.dev/fuchsia/src/sys/pkg/bin/pkgfs/allowlist"
-	"go.fuchsia.dev/fuchsia/src/sys/pkg/bin/pkgfs/blobfs"
-	"go.fuchsia.dev/fuchsia/src/sys/pkg/bin/pkgfs/index"
-	"go.fuchsia.dev/fuchsia/src/sys/pkg/bin/pm/pkg"
-)
-
-// Filesystem is the top level container for a pkgfs server
-type Filesystem struct {
-	root                                    *rootDirectory
-	static                                  *index.StaticIndex
-	index                                   *index.DynamicIndex
-	blobfs                                  *blobfs.Manager
-	mountInfo                               mountInfo
-	mountTime                               time.Time
-	allowedNonStaticPackages                *allowlist.Allowlist
-	enforceNonBaseExecutabilityRestrictions bool
-}
-
-// New initializes a new pkgfs filesystem server
-func New(blobDir *fdio.Directory, enforcePkgfsPackagesNonStaticAllowlist bool, enforceNonBaseExecutabilityRestrictions bool) (*Filesystem, error) {
-	bm, err := blobfs.New(blobDir)
-	if err != nil {
-		return nil, fmt.Errorf("pkgfs: open blobfs: %s", err)
-	}
-
-	static := index.NewStatic()
-	f := &Filesystem{
-		static: static,
-		index:  index.NewDynamic(static),
-		blobfs: bm,
-		mountInfo: mountInfo{
-			parentFd: -1,
-		},
-		enforceNonBaseExecutabilityRestrictions: enforceNonBaseExecutabilityRestrictions,
-	}
-
-	f.root = &rootDirectory{
-		unsupportedDirectory: unsupportedDirectory("/"),
-		fs:                   f,
-
-		dirs: map[string]fs.Directory{
-			"ctl": &ctlDirectory{
-				unsupportedDirectory: unsupportedDirectory("/ctl"),
-				fs:                   f,
-				dirs: map[string]fs.Directory{
-					"do-not-use-this-garbage": unsupportedDirectory("/ctl/do-not-use-this-garbage"),
-					"validation": &validationDir{
-						unsupportedDirectory: unsupportedDirectory("/ctl/validation"),
-						fs:                   f,
-					},
-				},
-			},
-			"install": &installDir{
-				unsupportedDirectory: unsupportedDirectory("/install"),
-				fs:                   f,
-			},
-			"needs": &needsRoot{
-				unsupportedDirectory: unsupportedDirectory("/needs"),
-				fs:                   f,
-			},
-			"packages": &packagesRoot{
-				unsupportedDirectory:      unsupportedDirectory("/packages"),
-				fs:                        f,
-				enforceNonStaticAllowlist: enforcePkgfsPackagesNonStaticAllowlist,
-			},
-			"versions": &versionsDirectory{
-				unsupportedDirectory: unsupportedDirectory("/versions"),
-				fs:                   f,
-			},
-			"system": unsupportedDirectory("/system"),
-		},
-	}
-
-	return f, nil
-}
-
-// staticIndexPath is the path inside the system package directory that contains the static packages for that system version.
-const staticIndexPath = "data/static_packages"
-const cacheIndexPath = "data/cache_packages"
-const disableExecutabilityEnforcementPath = "data/pkgfs_disable_executability_restrictions"
-const packagesAllowlistPath = "data/pkgfs_packages_non_static_packages_allowlist.txt"
-
-// loadStaticIndex loads the blob specified by root from blobfs. A non-nil
-// *StaticIndex is always returned. If an error is returned that indicates a
-// problem reading the index content from disk and therefore the StaticIndex
-// returned may be empty.
-func loadStaticIndex(static *index.StaticIndex, blobfs *blobfs.Manager, root string, systemImage pkg.Package, systemImageMerkleroot string) error {
-	indexFile, err := blobfs.Open(root)
-	if err != nil {
-		return fmt.Errorf("pkgfs: could not load static index from blob %s: %s", root, err)
-	}
-	defer indexFile.Close()
-
-	return static.LoadFrom(indexFile, systemImage, systemImageMerkleroot)
-}
-
-func (f *Filesystem) loadCacheIndex(root string) error {
-	log.Println("pkgfs: loading cache index")
-	start := time.Now()
-	indexFile, err := f.blobfs.Open(root)
-	if err != nil {
-		return fmt.Errorf("pkgfs: could not load cache index from blob %s: %s", root, err)
-	}
-	defer indexFile.Close()
-
-	entries, err := index.ParseIndexFile(indexFile)
-	if err != nil {
-		return fmt.Errorf("pkgfs: error parsing cache index: %v", err)
-	}
-
-	var foundCount int
-	for _, entry := range entries {
-		meta, err := f.blobfs.Open(entry.Merkle)
-		if err != nil {
-			// Package meta.far is missing, skip it.
-			continue
-		}
-		meta.Close()
-		if err := importPackage(f, entry.Merkle); err != nil && err != fs.ErrAlreadyExists {
-			// This probably shouldn't happen if the meta far is present already.
-			log.Printf("pkgfs: surprising error loading optional pkg %q: %v", entry.Key, err)
-		}
-		if f.index.IsInstalling(entry.Merkle) {
-			// Some content blobs are missing.
-			// Mark failed so we don't list the package in /pkgfs/needs/packages
-			f.index.InstallingFailedForPackage(entry.Merkle)
-		}
-		if _, found := f.index.GetRoot(entry.Merkle); found {
-			foundCount++
-		}
-	}
-	log.Printf("pkgfs: cache index loaded in %.3fs; found %d/%d packages",
-		time.Since(start).Seconds(), foundCount, len(entries))
-	return nil
-}
-
-// Read the /pkgfs/packages allowlist for which non-static packages it should return,
-// and set the allowlist in the packages directory.
-func (f *Filesystem) retrievePackagesAllowlist(pd *packageDir) {
-	log.Println("pkgfs: loading /pkgfs/packages non static allowlist")
-	blob, ok := pd.getBlobFor(packagesAllowlistPath)
-	if !ok {
-		log.Printf("pkgfs: couldn't get a blob for /pkgfs/packages allowlist path %v", packagesAllowlistPath)
-		return
-	}
-
-	if err := f.loadAllowedNonStaticPackages(blob); err != nil {
-		log.Printf("pkgfs: could not load pkgfs/packages allowlist: %v", err)
-		return
-	}
-}
-
-func (f *Filesystem) loadAllowedNonStaticPackages(blob string) error {
-	allowListFile, err := f.blobfs.Open(blob)
-	if err != nil {
-		return fmt.Errorf("could not load non_static_pkgs allowlist from blob %s: %v", blob, err)
-	}
-	defer allowListFile.Close()
-
-	allowList, err := allowlist.LoadFrom(allowListFile)
-	if err != nil {
-		return err
-	}
-
-	log.Printf("pkgfs: parsed non-static pkgs allowlist: %v", allowList)
-	f.allowedNonStaticPackages = allowList
-	return nil
-}
-
-// SetSystemRoot sets/updates the merkleroot (and static index) that backs the /system partition and static package index.
-func (f *Filesystem) SetSystemRoot(merkleroot string) error {
-	pd, err := newPackageDirFromBlob(merkleroot, f, true)
-	if err != nil {
-		return err
-	}
-	f.root.setDir("system", pd)
-
-	blob, ok := pd.getBlobFor(staticIndexPath)
-	if !ok {
-		return fmt.Errorf("pkgfs: new system root set, but new static index %q not found in %q", staticIndexPath, merkleroot)
-	}
-
-	err = loadStaticIndex(f.static, f.blobfs, blob, pkg.Package{
-		Name:    "system_image",
-		Version: "0",
-	}, merkleroot)
-	if err != nil {
-		return err
-	}
-
-	blob, ok = pd.getBlobFor(cacheIndexPath)
-	if ok {
-		err := f.loadCacheIndex(blob)
-		if err != nil {
-			return err
-		}
-	}
-
-	// Using our root package, retrieve the packages allowlist, if it exists,
-	// and pass it off to the packages root directory.
-	f.retrievePackagesAllowlist(pd)
-
-	// If the marker blob is known (even if it doesn't exist in blobfs),
-	// silently disable enforcement of executability restrictions.
-	if _, ok := pd.getBlobFor(disableExecutabilityEnforcementPath); ok {
-		f.enforceNonBaseExecutabilityRestrictions = false
-	}
-
-	return nil
-}
-
-// shouldAllowExecutableOpenByPackageName determines if a package should be
-// allowed to open blobs as executable. Contents of a package can be executed
-// if it is in the static index (without any runtime changes) or if it is in
-// the allowlist.
-func (f *Filesystem) shouldAllowExecutableOpenByPackageName(packageName string) bool {
-	inStaticIndex := f.static != nil && f.static.HasStaticName(packageName)
-	inAllowList := f.allowedNonStaticPackages != nil && f.allowedNonStaticPackages.Contains(packageName)
-
-	return inStaticIndex || inAllowList
-}
-
-// shouldAllowExecutableOpenByPackageRoot determines if a package should be
-// allowed to open blobs as executable. Contents of a package can be executed
-// if it is in the static index (without any runtime changes) or if it is in
-// the allowlist.
-func (f *Filesystem) shouldAllowExecutableOpenByPackageRoot(packageRoot string, packageName string) bool {
-	inStaticIndex := f.static != nil && f.static.HasStaticRoot(packageRoot)
-	inAllowList := f.allowedNonStaticPackages != nil && f.allowedNonStaticPackages.Contains(packageName)
-
-	return inStaticIndex || inAllowList
-}
-
-func (f *Filesystem) Blockcount() int64 {
-	// TODO(raggi): sum up all packages?
-	// TODO(raggi): delegate to blobfs?
-	return 0
-}
-
-func (f *Filesystem) Blocksize() int64 {
-	// TODO(raggi): sum up all packages?
-	// TODO(raggi): delegate to blobfs?
-	return 0
-}
-
-func (f *Filesystem) Size() int64 {
-	// TODO(raggi): delegate to blobfs?
-	return 0
-}
-
-func (f *Filesystem) Close() error {
-	return nil
-}
-
-func (f *Filesystem) RootDirectory() fs.Directory {
-	return f.root
-}
-
-func (f *Filesystem) Type() string {
-	return "pkgfs"
-}
-
-func (f *Filesystem) FreeSize() int64 {
-	return 0
-}
-
-func (f *Filesystem) DevicePath() string {
-	return ""
-}
-
-// Serve starts a Directory protocol RPC server on the given channel.
-func (f *Filesystem) Serve(c zx.Channel) error {
-	// rpc.NewServer takes ownership of the Handle and will close it on error.
-	_, err := rpc.NewServer(f, c)
-	if err != nil {
-		return fmt.Errorf("vfs server creation: %s", err)
-	}
-	f.mountInfo.serveChannel = c
-
-	return nil
-}
-
-var _ fs.FileSystem = (*Filesystem)(nil)
-
-// clean canonicalizes a path and returns a path that is relative to an assumed root.
-// as a result of this cleaning operation, an open of '/' or '.' or '' all return ''.
-// TODO(raggi): speed this up/reduce allocation overhead.
-func clean(path string) string {
-	return filepath.Clean("/" + path)[1:]
-}
-
-type mountInfo struct {
-	unmountOnce  sync.Once
-	serveChannel zx.Channel
-	parentFd     int
-}
-
-func goErrToFSErr(err error) error {
-	switch err {
-	case nil:
-		return nil
-	// Explicitly catch and pass through any error coming from the fs package.
-	case fs.ErrInvalidArgs, fs.ErrNotFound, fs.ErrAlreadyExists,
-		fs.ErrPermission, fs.ErrReadOnly, fs.ErrNoSpace, fs.ErrNoSpace,
-		fs.ErrFailedPrecondition, fs.ErrNotEmpty, fs.ErrNotOpen, fs.ErrNotAFile,
-		fs.ErrNotADir, fs.ErrIsActive, fs.ErrUnmounted, fs.ErrEOF,
-		fs.ErrNotSupported:
-		return err
-	case os.ErrInvalid:
-		return fs.ErrInvalidArgs
-	case os.ErrPermission:
-		return fs.ErrPermission
-	case os.ErrExist:
-		return fs.ErrAlreadyExists
-	case os.ErrNotExist:
-		return fs.ErrNotFound
-	case os.ErrClosed, io.ErrClosedPipe:
-		return fs.ErrNotOpen
-	case io.EOF, io.ErrUnexpectedEOF:
-		return fs.ErrEOF
-	}
-
-	switch e := err.(type) {
-	case *os.PathError:
-		return goErrToFSErr(e.Err)
-	case *zx.Error:
-		return e
-	}
-
-	log.Printf("unmapped fs error type: %#v", err)
-	return &zx.Error{Status: zx.ErrInternal, Text: err.Error()}
-}
-
-// GC examines the static and dynamic indexes, collects all the blobs that
-// belong to packages in these indexes. It then reads blobfs for its entire
-// list of blobs. Anything in blobfs that does not appear in the indexes is
-// removed.
-func (fs *Filesystem) GC() error {
-	log.Println("GC: start")
-	start := time.Now()
-	defer func() {
-		// this process produces a lot of garbage, so try to free that up (removes
-		// ~1.5mb of heap from a common (small) build target).
-		runtime.GC()
-		log.Printf("GC: completed in %.3fs", time.Since(start).Seconds())
-	}()
-
-	// read the list of installed blobs first, as there may be installations in
-	// progress, we want to not create orphans later, this list must be equal to or
-	// a subset of the set we intersect.
-	installedBlobNames, err := fs.blobfs.Blobs()
-	if err != nil {
-		return fmt.Errorf("GC: unable to list blobfs: %s", err)
-	}
-	installedBlobs := make(map[string]struct{}, len(installedBlobNames))
-	for _, name := range installedBlobNames {
-		installedBlobs[name] = struct{}{}
-	}
-	log.Printf("GC: %d blobs in blobfs", len(installedBlobs))
-
-	allPackageBlobs := fs.index.AllPackageBlobs()
-	// access the meta FAR blob of the system package
-	if pd, ok := fs.root.dir("system").(*packageDir); ok {
-		allPackageBlobs = append(allPackageBlobs, pd.contents["meta"].blobId)
-	} else {
-		return fmt.Errorf("GC: gc aborted, system directory is of unknown type")
-	}
-
-	// Walk the list of all packages and collate all involved blobs, both the fars
-	// themselves, and the contents on which they depend.
-	allBlobs := make(map[string]struct{})
-	for _, pkgRoot := range allPackageBlobs {
-		allBlobs[pkgRoot] = struct{}{}
-
-		pDir, err := newPackageDirFromBlob(pkgRoot, fs, false)
-		if err != nil {
-			log.Printf("GC: failed getting package from blob %s: %s", pkgRoot, err)
-			continue
-		}
-
-		for _, m := range pDir.Blobs() {
-			allBlobs[m] = struct{}{}
-		}
-		pDir.Close()
-	}
-
-	log.Printf("GC: %d blobs referenced by %d packages", len(allBlobs), len(allPackageBlobs))
-
-	for m := range allBlobs {
-		delete(installedBlobs, m)
-	}
-
-	// remove all the blobs we no longer need
-	log.Printf("GC: removing %d blobs from blobfs", len(installedBlobs))
-
-	i := 0
-	for m := range installedBlobs {
-		i += 1
-		e := os.Remove(path.Join("/blob", m))
-		if e != nil {
-			log.Printf("GC: error removing %s from blobfs: %s", m, e)
-		}
-		if i%100 == 0 {
-			log.Printf("GC: deleted %d of %d blobs in %.3fs", i, len(installedBlobs), time.Since(start).Seconds())
-		}
-	}
-	return nil
-}
-
-// ValidateStaticIndex compares the contents of the static index against what
-// blobs are available in blobfs. It returns the ids of the blobs that are
-// present and those that are missing or any error encountered trying to do the
-// validation.
-func (fs *Filesystem) ValidateStaticIndex() (map[string]struct{}, map[string]struct{}, error) {
-	installedBlobNames, err := fs.blobfs.Blobs()
-	if err != nil {
-		return nil, nil, fmt.Errorf("pmd_validate: unable to list blobfs: %s", err)
-	}
-	installedBlobs := make(map[string]struct{}, len(installedBlobNames))
-	for _, name := range installedBlobNames {
-		installedBlobs[name] = struct{}{}
-	}
-
-	present := make(map[string]struct{})
-	missing := make(map[string]struct{})
-	staticPkgs := fs.static.StaticPackageBlobs()
-	if pd, ok := fs.root.dir("system").(*packageDir); ok {
-		staticPkgs = append(staticPkgs, pd.contents["meta"].blobId)
-	}
-
-	for _, pkgRoot := range staticPkgs {
-		if _, ok := installedBlobs[pkgRoot]; ok {
-			present[pkgRoot] = struct{}{}
-		} else {
-			log.Printf("pmd_validate: %q root is missing", pkgRoot)
-			missing[pkgRoot] = struct{}{}
-		}
-
-		pDir, err := newPackageDirFromBlob(pkgRoot, fs, false)
-		if err != nil {
-			log.Printf("pmd_validate: failed getting package from blob %s: %s", pkgRoot, err)
-			pDir.Close()
-			return nil, nil, err
-		}
-
-		for _, m := range pDir.Blobs() {
-			if _, ok := installedBlobs[m]; ok {
-				present[m] = struct{}{}
-			} else {
-				log.Printf("pmd_validate: %q is missing from %q", m, pkgRoot)
-				missing[m] = struct{}{}
-			}
-		}
-		pDir.Close()
-	}
-	return present, missing, nil
-}
diff --git a/src/sys/pkg/bin/pkgfs/pkgfs/pkgfs_test.go b/src/sys/pkg/bin/pkgfs/pkgfs/pkgfs_test.go
deleted file mode 100644
index 54ddfd9..0000000
--- a/src/sys/pkg/bin/pkgfs/pkgfs/pkgfs_test.go
+++ /dev/null
@@ -1,885 +0,0 @@
-// Copyright 2017 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-//go:build fuchsia && !build_with_native_toolchain
-// +build fuchsia,!build_with_native_toolchain
-
-package pkgfs
-
-import (
-	"bytes"
-	"context"
-	"encoding/binary"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"log"
-	"os"
-	"path/filepath"
-	"runtime"
-	"sort"
-	"strings"
-	"syscall"
-	"syscall/zx"
-	"syscall/zx/fdio"
-	zxio "syscall/zx/io"
-	"testing"
-
-	fidlio "fidl/fuchsia/io"
-
-	"go.fuchsia.dev/fuchsia/src/sys/pkg/bin/pkgfs/iou"
-	"go.fuchsia.dev/fuchsia/src/sys/pkg/bin/pkgfs/ramdisk"
-	"go.fuchsia.dev/fuchsia/src/sys/pkg/bin/pm/build"
-	"go.fuchsia.dev/fuchsia/src/sys/pkg/bin/pm/pkg"
-)
-
-// Adding a file to /in writes the file to blobfs
-// Adding a file that is a meta.far to /in creates the package in the package filesystem
-// If not all of a packages contents are available, opening the package directory should fail
-// A package directory should contain all files from meta.far and listed by meta/contents
-
-var (
-	pkgfsDir *fdio.Directory
-	blobDir  *ramdisk.Ramdisk
-)
-
-func installTestPackage(installJustMetaFar bool) string {
-	cfg := build.TestConfig()
-	defer os.RemoveAll(filepath.Dir(cfg.TempDir))
-	build.TestPackage(cfg)
-	build.BuildTestPackage(cfg)
-
-	bi, err := cfg.BlobInfo()
-	if err != nil {
-		panic(fmt.Errorf("Creating BlobInfo: %s", err))
-	}
-
-	// Install the blobs to blobfs directly.
-	for _, b := range bi {
-		src, err := os.Open(b.SourcePath)
-		if err != nil {
-			panic(err)
-		}
-		dst, err := blobDir.Open(b.Merkle.String(), os.O_WRONLY|os.O_CREATE, 0o700)
-		if err != nil {
-			panic(fmt.Errorf("Opening blob dst: %s", err))
-		}
-		if err := dst.Truncate(int64(b.Size)); err != nil {
-			panic(err)
-		}
-		if _, err = io.Copy(dst, src); err != nil {
-			panic(err)
-		}
-		if err := src.Close(); err != nil {
-			panic(err)
-		}
-		if err := dst.Close(); err != nil {
-			panic(err)
-		}
-		if installJustMetaFar {
-			return bi[0].Merkle.String()
-		}
-	}
-
-	return bi[0].Merkle.String()
-}
-
-var testPackageMerkle string
-
-// tmain exists for the defer convenience, so that defers are run before os.Exit gets called.
-func tmain(m *testing.M) int {
-	log.SetFlags(log.Lshortfile)
-
-	var err error
-	if blobDir, err = ramdisk.New(10 * 1024 * 1024); err != nil {
-		panic(fmt.Errorf("Creating blobfs ramdisk: %s", err))
-	}
-	if err := blobDir.StartBlobfs(); err != nil {
-		panic(fmt.Errorf("Starting blobfs: %s", err))
-	}
-	defer blobDir.Destroy()
-
-	testPackageMerkle = installTestPackage(false)
-	systemImageMerkle := installTestPackage(true)
-
-	d, err := ioutil.TempDir("", "pkgfs-test-mount")
-	if err != nil {
-		panic(err)
-	}
-	defer os.RemoveAll(d)
-
-	blobd, err := blobDir.Open(".", os.O_RDWR|syscall.O_DIRECTORY, 0o700)
-	if err != nil {
-		panic(err)
-	}
-	defer func() {
-		// The Go syscall API doesn't provide any way to detatch the underlying
-		// channel from the *File wrapper, so once the GC runs, then blobd will be
-		// closed and then pkgfs can't access the blobfs anymore, so we have to keep
-		// it alive for at least the runtime of the tests.
-		runtime.KeepAlive(blobd)
-	}()
-
-	pkgfs, err := New(syscall.FDIOForFD(int(blobd.Fd())).(*fdio.Directory), false, false)
-	if err != nil {
-		panic(err)
-	}
-	systemImagePackage := pkg.Package{
-		Name:    "system_image",
-		Version: "0",
-	}
-	pkgfs.static.LoadFrom(strings.NewReader(
-		fmt.Sprintf("static-package/0=%s\n", testPackageMerkle)), systemImagePackage, systemImageMerkle)
-
-	nc, sc, err := zx.NewChannel(0)
-	if err != nil {
-		panic(err)
-	}
-
-	pkgfsDir = fdio.NewDirectoryWithCtx(&zxio.DirectoryWithCtxInterface{Channel: nc})
-	if err = pkgfs.Serve(sc); err != nil {
-		panic(err)
-	}
-	return m.Run()
-}
-
-func TestMain(m *testing.M) {
-	os.Exit(tmain(m))
-}
-
-func TestAddPackage(t *testing.T) {
-	cfg := build.TestConfig()
-	defer os.RemoveAll(filepath.Dir(cfg.TempDir))
-
-	cfg.PkgName = "test-add-package"
-
-	build.BuildTestPackage(cfg)
-
-	bi, err := cfg.BlobInfo()
-	if err != nil {
-		t.Fatal(err)
-	}
-	merkleroot := bi[0].Merkle.String()
-
-	dst, err := iou.OpenFrom(pkgfsDir, filepath.Join("install/pkg", merkleroot), os.O_RDWR|os.O_CREATE, 0o700)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if err := dst.Truncate(int64(bi[0].Size)); err != nil {
-		t.Fatal(err)
-	}
-	src, err := os.Open(bi[0].SourcePath)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if _, err := io.Copy(dst, src); err != nil {
-		src.Close()
-		dst.Close()
-		t.Fatal(err)
-	}
-	if src.Close(); err != nil {
-		t.Fatal(err)
-	}
-	if dst.Close(); err != nil {
-		t.Fatal(err)
-	}
-
-	// Opening it again gives EEXIST
-	_, err = iou.OpenFrom(pkgfsDir, filepath.Join("install/pkg", merkleroot), os.O_RDWR|os.O_CREATE, 0o700)
-	if !os.IsExist(err) {
-		t.Fatal(err)
-	}
-
-	d, err := blobDir.Open(merkleroot, syscall.O_PATH, 0o700)
-	if err != nil {
-		t.Fatal(err)
-	}
-	_, err = d.Stat()
-	d.Close()
-	if err != nil {
-		t.Fatalf("package blob missing after package write: %s", err)
-	}
-
-	f, err := iou.OpenFrom(pkgfsDir, filepath.Join("packages", cfg.PkgName, cfg.PkgVersion), os.O_RDONLY|syscall.O_DIRECTORY, 0o700)
-	if err == nil {
-		f.Close()
-		t.Error("package appeared in the pkgfs package tree before needs fulfilled")
-	}
-
-	expectedNeeds := []string{}
-	for _, b := range bi {
-		if _, err := blobDir.Open(b.Merkle.String(), syscall.O_PATH, 0o700); os.IsNotExist(err) {
-			expectedNeeds = append(expectedNeeds, b.Merkle.String())
-		}
-	}
-	sort.Strings(expectedNeeds)
-
-	f, err = iou.OpenFrom(pkgfsDir, filepath.Join("needs", "packages", merkleroot), os.O_RDONLY|syscall.O_DIRECTORY, 0o700)
-	needsPkgs, err := f.Readdirnames(256)
-	if err != nil {
-		t.Error(err)
-	}
-	if err := f.Close(); err != nil {
-		t.Fatal(err)
-	}
-
-	if got, want := len(needsPkgs), len(expectedNeeds); got != want {
-		t.Errorf("needs/packages/{root}/* count: got %d, want %d", got, want)
-	}
-	sort.Strings(needsPkgs)
-	for i := range expectedNeeds {
-		if got, want := filepath.Base(needsPkgs[i]), expectedNeeds[i]; got != want {
-			t.Errorf("needs/packages/{root}/{file} got %q, want %q", got, want)
-		}
-	}
-
-	// install the blobs of the package
-	for _, b := range bi[1:] {
-		root := b.Merkle.String()
-		idx := sort.SearchStrings(needsPkgs, root)
-		if idx == len(needsPkgs) {
-			continue
-		}
-
-		dst, err := iou.OpenFrom(pkgfsDir, filepath.Join("install/blob", root), os.O_RDWR|os.O_CREATE, 0o700)
-		if os.IsExist(err) {
-			continue
-		}
-		if err != nil {
-			t.Fatal(err)
-		}
-		if err := dst.Truncate(int64(b.Size)); err != nil {
-			t.Fatal(err)
-		}
-		src, err := os.Open(b.SourcePath)
-		if err != nil {
-			t.Fatal(err)
-		}
-		if _, err = io.Copy(dst, src); err != nil {
-			t.Fatal(err)
-		}
-		if src.Close(); err != nil {
-			t.Fatal(err)
-		}
-		if dst.Close(); err != nil {
-			t.Fatal(err)
-		}
-	}
-
-	var info os.FileInfo
-	if info, err = pkgfsStat(filepath.Join("packages", cfg.PkgName)); err != nil {
-		t.Fatalf("package did not appear in the pkgfs package tree: %s", err)
-	}
-	if !info.IsDir() {
-		t.Errorf("os.Stat on package directory says it's not a directory")
-	}
-	if info, err = pkgfsStat(filepath.Join("packages", cfg.PkgName, cfg.PkgVersion)); err != nil {
-		t.Fatalf("package version did not appear in the pkgfs package tree: %s", err)
-	}
-	if !info.IsDir() {
-		t.Errorf("os.Stat on package version directory says it's not a directory")
-	}
-
-	for _, b := range bi[1:] {
-		got, err := pkgfsReadFile(filepath.Join("packages", cfg.PkgName, cfg.PkgVersion, b.Path))
-		if err != nil {
-			t.Fatal(err)
-		}
-		want, err := ioutil.ReadFile(b.SourcePath)
-		if err != nil {
-			t.Fatal(err)
-		}
-		if !bytes.Equal(got, want) {
-			t.Errorf("got %x, want %x", got, want)
-		}
-	}
-
-	// assert that the dynamically added package appears in /versions
-	metaMerkle, err := pkgfsReadFile(filepath.Join("versions", merkleroot, "meta"))
-	if err != nil {
-		t.Fatal(err)
-	}
-	if got, want := string(metaMerkle), merkleroot; got != want {
-		t.Errorf("add dynamic package, bad version: got %q, want %q", got, want)
-	}
-}
-
-func pkgfsReadFile(path string) ([]byte, error) {
-	f, err := iou.OpenFrom(pkgfsDir, path, os.O_RDONLY, 0o700)
-	if err != nil {
-		return nil, err
-	}
-	defer f.Close()
-	buf := bytes.Buffer{}
-	if _, err := io.Copy(&buf, f); err != io.EOF && err != nil {
-		return nil, err
-	}
-	return buf.Bytes(), nil
-}
-
-func pkgfsStat(path string) (os.FileInfo, error) {
-	f, err := iou.OpenFrom(pkgfsDir, path, os.O_RDONLY|syscall.O_PATH, 0o700)
-	if err != nil {
-		return nil, err
-	}
-	defer f.Close()
-	return f.Stat()
-}
-
-func TestMetaFarRootDuality(t *testing.T) {
-	path := "packages/static-package/0/meta"
-
-	t.Run("meta is a file containing the merkleroot", func(t *testing.T) {
-		f, err := iou.OpenFrom(pkgfsDir, path, 0, 0o700)
-		if err != nil {
-			t.Fatal(err)
-		}
-		defer f.Close()
-		b, err := ioutil.ReadAll(f)
-		if err != nil {
-			t.Fatal(err)
-		}
-		if len(b) != 64 && string(b) == testPackageMerkle {
-			t.Fatalf("expected 64 byte merkleroot of %q, got %q", testPackageMerkle, string(b))
-		}
-	})
-
-	t.Run("meta is a directory containing files", func(t *testing.T) {
-		f, err := iou.OpenFrom(pkgfsDir, path, syscall.O_DIRECTORY, 0o700)
-		if err != nil {
-			t.Fatal(err)
-		}
-		defer f.Close()
-		list, err := f.Readdirnames(-1)
-		if err != nil {
-			t.Fatal(err)
-		}
-		found := false
-		for _, item := range list {
-			if item == "contents" {
-				found = true
-				break
-			}
-		}
-		if !found {
-			t.Fatalf("did not find 'contents' file among meta/ readdir: %v", list)
-		}
-
-		contents, err := iou.OpenFrom(pkgfsDir, filepath.Join(path, "contents"), 0, 0o700)
-		if err != nil {
-			t.Fatal(err)
-		}
-		defer contents.Close()
-		fi, err := contents.Stat()
-		if err != nil {
-			t.Fatal(err)
-		}
-
-		offset, err := contents.Seek(17, io.SeekStart)
-		if err != nil {
-			t.Fatal(err)
-		}
-		if offset != 17 {
-			t.Fatalf("Tried to seek to 17 but got %d", offset)
-		}
-		offset, err = contents.Seek(-7, io.SeekCurrent)
-		if err != nil {
-			t.Fatal(err)
-		}
-		if offset != 10 {
-			t.Fatalf("Tried to seek to 17-7 but got %d", offset)
-		}
-		offset, err = contents.Seek(0, io.SeekEnd)
-		if err != nil {
-			t.Fatal(err)
-		}
-		if offset == 0 {
-			t.Fatalf("Tried to seek to end but got %d", offset)
-		}
-		if offset != fi.Size() {
-			t.Fatalf("Seek to end arrived at %d but expected %d size", offset, fi.Size())
-		}
-	})
-
-	t.Run("meta subdirectories are openable and listable", func(t *testing.T) {
-		f, err := iou.OpenFrom(pkgfsDir, "packages/static-package/0/meta/foo", 0, 0o700)
-		if err != nil {
-			t.Fatal(err)
-		}
-		defer f.Close()
-		fi, err := f.Stat()
-		if err != nil {
-			t.Fatal(err)
-		}
-		if !fi.IsDir() {
-			t.Fatal("expected static-package/0/meta/foo to be a directory, not a file")
-		}
-		list, err := f.Readdirnames(-1)
-		if err != nil {
-			t.Fatal(err)
-		}
-		if len(list) != 1 && list[0] == "one" {
-			t.Fatalf("expected list to contain one file, got %v", list)
-		}
-	})
-
-	t.Run("meta subdirectories do not have file/directory duality", func(t *testing.T) {
-		// protect against regression of name prefix fixup in metafar.go,
-		// wherein at time of test authorship, a "." open would open meta/
-		// instead.
-		d, err := pkgfsDir.Open("packages/static-package/0/meta/foo", syscall.O_DIRECTORY, 0o700)
-		if err != nil {
-			t.Fatal(err)
-		}
-		defer d.Close()
-
-		f, err := iou.OpenFrom(d.(*fdio.Directory), "", syscall.O_RDONLY, 0o700)
-		if err != nil {
-			t.Fatal(err)
-		}
-		defer f.Close()
-		fi, err := f.Stat()
-		if err != nil {
-			t.Fatal(err)
-		}
-		if !fi.IsDir() {
-			t.Fatal("expected static-package/0/meta/foo to be a directory, not a file")
-		}
-	})
-
-}
-
-func TestExecutability(t *testing.T) {
-	// packages/static-package/0/meta/contents should not be openable
-	// executable, because meta/* is never executable
-	path := "packages/static-package/0/meta/contents"
-	f, err := pkgfsDir.Open(path, syscall.FsRightReadable|syscall.FsRightExecutable, 0o700)
-	if f != nil || err == nil {
-		t.Fatal(err)
-	}
-
-	// packages/static-package/0/a should be openable executable, because
-	// files from packages are executable.
-	path = "packages/static-package/0/a"
-	f, err = pkgfsDir.Open(path, syscall.FsRightReadable|syscall.FsRightExecutable, 0o700)
-	if err != nil {
-		t.Fatal(err)
-	}
-	f.Close()
-}
-
-func TestListContainsStatic(t *testing.T) {
-	//names, err := filepath.Glob(filepath.Join(pkgfsMount, "packages", "*", "*"))
-	f, err := iou.OpenFrom(pkgfsDir, "packages/static-package/0", os.O_RDONLY|syscall.O_DIRECTORY, 0o700)
-	if err != nil {
-		t.Fatal(err)
-	}
-	names, err := f.Readdirnames(-1)
-	f.Close()
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if len(names) <= 0 {
-		t.Errorf("static-package appears to be empty or missing")
-	}
-}
-
-func TestListRoot(t *testing.T) {
-	f, err := iou.OpenFrom(pkgfsDir, ".", os.O_RDONLY|syscall.O_DIRECTORY, 0o700)
-	if err != nil {
-		t.Fatal(err)
-	}
-	names, err := f.Readdirnames(-1)
-	f.Close()
-	if err != nil {
-		t.Fatal(err)
-	}
-	want := []string{"install", "needs", "packages", "system", "versions", "ctl"}
-	sort.Strings(names)
-	sort.Strings(want)
-
-	if len(names) != len(want) {
-		t.Fatalf("got %v, want %v", names, want)
-	}
-
-	for i, name := range names {
-		got := filepath.Base(name)
-		if want := want[i]; got != want {
-			t.Errorf("got %q, want %q", got, want)
-		}
-	}
-
-}
-
-func TestListCtl(t *testing.T) {
-	f, err := iou.OpenFrom(pkgfsDir, "ctl", os.O_RDONLY|syscall.O_DIRECTORY, 0o700)
-	if err != nil {
-		t.Fatal(err)
-	}
-	names, err := f.Readdirnames(-1)
-	f.Close()
-	if err != nil {
-		t.Fatal(err)
-	}
-	want := []string{"do-not-use-this-garbage", "validation"}
-	sort.Strings(names)
-	sort.Strings(want)
-
-	if len(names) != len(want) {
-		t.Fatalf("got %v, want %v", names, want)
-	}
-
-	for i, name := range names {
-		got := filepath.Base(name)
-		if want := want[i]; got != want {
-			t.Errorf("got %q, want %q", got, want)
-		}
-	}
-}
-
-func TestSync(t *testing.T) {
-	d, err := iou.OpenFrom(pkgfsDir, "ctl", os.O_RDONLY|syscall.O_DIRECTORY, 0o700)
-	if err != nil {
-		t.Fatal(err)
-	}
-	if err = d.Sync(); err != nil {
-		t.Fatal(err)
-	}
-	d.Close()
-}
-
-func TestMetaFileGetFlags(t *testing.T) {
-	path := "packages/static-package/0/meta/contents"
-	f, err := pkgfsDir.Open(path, syscall.FsRightReadable, 0o700)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer f.Close()
-
-	fdioFile, ok := f.(*fdio.File)
-	if !ok {
-		t.Fatal("File is not an fdio.File")
-	}
-
-	ioFile := (*zxio.FileWithCtxInterface)(fdioFile.Node.NodeWithCtxInterface)
-
-	status, flags, err := ioFile.GetFlags(context.Background())
-	if err != nil || status != int32(zx.ErrOk) {
-		t.Fatal("Could not get flags:", err, status)
-	}
-
-	if flags != syscall.FsRightReadable {
-		t.Fatalf("got %v, want %v", flags, syscall.FsRightReadable)
-	}
-}
-
-func TestMapFileForRead(t *testing.T) {
-	path := "packages/static-package/0/meta/contents"
-	f, err := pkgfsDir.Open(path, syscall.FsRightReadable, 0o700)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer f.Close()
-
-	fdioFile, ok := f.(*fdio.File)
-	if !ok {
-		t.Fatal("File is not an fdio.File")
-	}
-	defer fdioFile.Close()
-
-	flags := zxio.VmoFlagsRead
-	result, err := fdioFile.GetBackingMemory(flags)
-	if err != nil {
-		t.Fatalf("GetBackingMemory(): %s", err)
-	}
-	switch w := result.Which(); w {
-	case zxio.File2GetBackingMemoryResultErr:
-		t.Fatalf("GetBackingMemory: %s", zx.Status(result.Err))
-	case zxio.File2GetBackingMemoryResultResponse:
-		vmo := result.Response.Vmo
-		defer func() {
-			if err := vmo.Close(); err != nil {
-				t.Errorf("vmo.Close(): %s", err)
-			}
-		}()
-		var sizeBytes [8]byte
-		if err := vmo.Handle().GetProperty(zx.PropVmoContentSize, sizeBytes[:]); err != nil {
-			t.Fatalf("GetProperty(zx.PropVmoContentSize): %s", err)
-		}
-		size := binary.LittleEndian.Uint64(sizeBytes[:])
-		if want := uint64(347); size != want {
-			t.Fatalf("got size = %d, want %d", size, want)
-		}
-		buf := make([]byte, size)
-		offset := uint64(0)
-		if err := vmo.Read(buf, offset); err != nil {
-			t.Fatalf("vmo.Read(): %s", err)
-		}
-	default:
-		t.Fatalf("unknown variant %d", w)
-	}
-}
-
-func getKoid(h *zx.Handle) (uint64, error) {
-	info, err := h.GetInfoHandleBasic()
-	if err != nil {
-		return 0, err
-	}
-	return info.Koid, nil
-}
-
-func TestMapFileForReadPrivate(t *testing.T) {
-	path := "packages/static-package/0/meta/contents"
-	f, err := pkgfsDir.Open(path, syscall.FsRightReadable, 0o700)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer f.Close()
-
-	fdioFile, ok := f.(*fdio.File)
-	if !ok {
-		t.Fatal("File is not an fdio.File")
-	}
-	defer fdioFile.Close()
-
-	flags := zxio.VmoFlagsRead | zxio.VmoFlagsPrivateClone
-
-	// We want to test that we're receiving our own clone each time we invoke
-	// GetBackingMemory() with VmoFlagsPrivateClone.
-	var firstVmo *zx.VMO
-	{
-		result, err := fdioFile.GetBackingMemory(flags)
-		if err != nil {
-			t.Fatalf("GetBackingMemory(): %s", err)
-		}
-		switch w := result.Which(); w {
-		case zxio.File2GetBackingMemoryResultErr:
-			t.Fatalf("GetBackingMemory: %s", zx.Status(result.Err))
-		case zxio.File2GetBackingMemoryResultResponse:
-			vmo := result.Response.Vmo
-			defer func() {
-				if err := vmo.Close(); err != nil {
-					t.Errorf("vmo.Close(): %s", err)
-				}
-			}()
-			firstVmo = &vmo
-		default:
-			t.Fatalf("unhandled variant: %d", w)
-		}
-	}
-
-	var secondVmo *zx.VMO
-	{
-		result, err := fdioFile.GetBackingMemory(flags)
-		if err != nil {
-			t.Fatalf("GetBackingMemory(): %s", err)
-		}
-		switch w := result.Which(); w {
-		case zxio.File2GetBackingMemoryResultErr:
-			t.Fatalf("GetBackingMemory: %s", zx.Status(result.Err))
-		case zxio.File2GetBackingMemoryResultResponse:
-			vmo := result.Response.Vmo
-			defer func() {
-				if err := vmo.Close(); err != nil {
-					t.Errorf("vmo.Close(): %s", err)
-				}
-			}()
-			secondVmo = &vmo
-		default:
-			t.Fatalf("unhandled variant: %d", w)
-		}
-	}
-
-	firstKoid, err := getKoid(firstVmo.Handle())
-	if err != nil {
-		t.Fatal("Could not retrieve koid of handle: ", err)
-	}
-	secondKoid, err := getKoid(secondVmo.Handle())
-	if err != nil {
-		t.Fatal("Could not retrieve koid of handle: ", err)
-	}
-	if firstKoid == secondKoid {
-		t.Fatal("Two GetBuffer calls with VmoFlagsPrivate produced handles to the same object")
-	}
-}
-
-func TestMapFileForReadExact(t *testing.T) {
-	path := "packages/static-package/0/meta/contents"
-	f, err := pkgfsDir.Open(path, syscall.FsRightReadable, 0o700)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer f.Close()
-
-	fdioFile, ok := f.(*fdio.File)
-	if !ok {
-		t.Fatal("File is not an fdio.File")
-	}
-
-	// Exact flag is not supported in pkgfs
-	flags := zxio.VmoFlagsSharedBuffer
-	result, err := fdioFile.GetBackingMemory(flags)
-	if err != nil {
-		t.Fatalf("GetBackingMemory(): %s", err)
-	}
-	switch w := result.Which(); w {
-	case zxio.File2GetBackingMemoryResultErr:
-		if got, want := zx.Status(result.Err), zx.ErrNotSupported; got != want {
-			t.Fatalf("got GetBackingMemory() = %s, want %s", got, want)
-		}
-	case zxio.File2GetBackingMemoryResultResponse:
-		t.Fatal("Attempt to map with VmoFlagsSharedBuffer should fail")
-	default:
-		t.Fatalf("unhandled variant: %d", w)
-	}
-}
-
-func TestMapFilePrivateAndExact(t *testing.T) {
-	path := "packages/static-package/0/meta/contents"
-	f, err := pkgfsDir.Open(path, syscall.FsRightReadable, 0o700)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer f.Close()
-
-	fdioFile, ok := f.(*fdio.File)
-	if !ok {
-		t.Fatal("File is not an fdio.File")
-	}
-
-	// This combination is invalid according to the fuchsia.io protocol definition.
-	flags := zxio.VmoFlagsPrivateClone | zxio.VmoFlagsSharedBuffer
-	result, err := fdioFile.GetBackingMemory(flags)
-	if err != nil {
-		t.Fatalf("GetBackingMemory(): %s", err)
-	}
-	switch w := result.Which(); w {
-	case zxio.File2GetBackingMemoryResultErr:
-		if got, want := zx.Status(result.Err), zx.ErrNotSupported; got != want {
-			t.Fatalf("got GetBackingMemory() = %s, want %s", got, want)
-		}
-	case zxio.File2GetBackingMemoryResultResponse:
-		t.Fatal("Attempt to specify VmoFlagsPrivateClone and VmoFlagsSharedBuffer should fail")
-	default:
-		t.Fatalf("unhandled variant: %d", w)
-	}
-}
-
-func TestMapFileForWrite(t *testing.T) {
-	path := "packages/static-package/0/meta/contents"
-	f, err := pkgfsDir.Open(path, syscall.FsRightReadable, 0o700)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer f.Close()
-
-	fdioFile, ok := f.(*fdio.File)
-	if !ok {
-		t.Fatal("File is not an fdio.File")
-	}
-
-	// Files in a meta directory are read-only, creating a writable mapping
-	// should fail.
-	flags := zxio.VmoFlagsWrite
-	result, err := fdioFile.GetBackingMemory(flags)
-	if err != nil {
-		t.Fatalf("GetBackingMemory(): %s", err)
-	}
-	switch w := result.Which(); w {
-	case zxio.File2GetBackingMemoryResultErr:
-		if got, want := zx.Status(result.Err), zx.ErrBadHandle; got != want {
-			t.Fatalf("got GetBackingMemory() = %s, want %s", got, want)
-		}
-	case zxio.File2GetBackingMemoryResultResponse:
-		t.Fatal("Attempt to get a writable buffer should fail")
-	default:
-		t.Fatalf("unhandled variant: %d", w)
-	}
-}
-
-func TestMapFileForExec(t *testing.T) {
-	path := "packages/static-package/0/meta/contents"
-	f, err := pkgfsDir.Open(path, syscall.FsRightReadable, 0o700)
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer f.Close()
-
-	fdioFile, ok := f.(*fdio.File)
-	if !ok {
-		t.Fatal("File is not an fdio.File")
-	}
-
-	flags := zxio.VmoFlagsExecute
-	result, err := fdioFile.GetBackingMemory(flags)
-	if err != nil {
-		t.Fatalf("GetBackingMemory(): %s", err)
-	}
-	switch w := result.Which(); w {
-	case zxio.File2GetBackingMemoryResultErr:
-		if got, want := zx.Status(result.Err), zx.ErrBadHandle; got != want {
-			t.Fatalf("got GetBackingMemory() = %s, want %s", got, want)
-		}
-	case zxio.File2GetBackingMemoryResultResponse:
-		t.Fatal("Attempt to get executable buffer should fail")
-	default:
-		t.Fatalf("unhandled variant: %d", w)
-	}
-}
-
-func TestTriggerGC(t *testing.T) {
-	// always perform the operation on a dedicated channel, so that pkgfsDir is not
-	// closed.
-	unlink := func(path string) error {
-		d, err := pkgfsDir.Open(".", uint32(fidlio.OpenFlagsDirectory|fidlio.OpenFlagsRightReadable|fidlio.OpenFlagsRightWritable), 0o700)
-		if err != nil {
-			return err
-		}
-		return d.Unlink(path)
-	}
-
-	// /pkgfs/do-not-use-this-garbage no longer exists
-	if err := unlink("do-not-use-this-garbage"); err == nil {
-		t.Fatal("expected error, got nil")
-	}
-
-	// unlinking do-not-use-this-garbage triggers a GC but doesn't remove the file.
-	if err := unlink("ctl/do-not-use-this-garbage"); err != nil {
-		t.Fatal(err)
-	}
-	if err := unlink("ctl/do-not-use-this-garbage"); err != nil {
-		t.Fatal(err)
-	}
-}
-
-func TestVersions(t *testing.T) {
-	f, err := iou.OpenFrom(pkgfsDir, "versions", os.O_RDONLY|syscall.O_DIRECTORY, 0o700)
-	if err != nil {
-		t.Fatal(err)
-	}
-	names, err := f.Readdirnames(-1)
-	f.Close()
-	if err != nil {
-		t.Fatal(err)
-	}
-	if len(names) == 0 {
-		t.Fatal("observed no versions")
-	}
-
-	for _, name := range names {
-		if !merklePat.MatchString(filepath.Base(name)) {
-			t.Errorf("got non-merkle version: %q", name)
-			continue
-		}
-
-		b, err := pkgfsReadFile(filepath.Join("versions", name, "meta"))
-		if err != nil {
-			t.Fatal(err)
-		}
-		if got, want := string(b), filepath.Base(name); got != want {
-			t.Errorf("got %q, want %q", got, want)
-		}
-	}
-}
diff --git a/src/sys/pkg/bin/pkgfs/pkgfs/root_directory.go b/src/sys/pkg/bin/pkgfs/pkgfs/root_directory.go
deleted file mode 100644
index 3b507cb..0000000
--- a/src/sys/pkg/bin/pkgfs/pkgfs/root_directory.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2018 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// +build !build_with_native_toolchain
-
-package pkgfs
-
-import (
-	"strings"
-	"sync"
-	"time"
-
-	"go.fuchsia.dev/fuchsia/src/lib/thinfs/fs"
-)
-
-type rootDirectory struct {
-	unsupportedDirectory
-	fs   *Filesystem
-	mu   sync.RWMutex
-	dirs map[string]fs.Directory
-}
-
-func (d *rootDirectory) Lock() {
-	d.mu.Lock()
-}
-
-func (d *rootDirectory) Unlock() {
-	d.mu.Unlock()
-}
-
-func (d *rootDirectory) Dup() (fs.Directory, error) {
-	return d, nil
-}
-
-func (d *rootDirectory) Open(name string, flags fs.OpenFlags) (fs.File, fs.Directory, *fs.Remote, error) {
-	name = clean(name)
-	if name == "" {
-		return nil, d, nil, nil
-	}
-
-	parts := strings.SplitN(name, "/", 2)
-
-	d.mu.RLock()
-	subdir, ok := d.dirs[parts[0]]
-	d.mu.RUnlock()
-	if !ok {
-		return nil, nil, nil, fs.ErrNotFound
-	}
-
-	if len(parts) == 1 {
-		return nil, subdir, nil, nil
-	}
-
-	return subdir.Open(parts[1], flags)
-}
-
-func (d *rootDirectory) Read() ([]fs.Dirent, error) {
-
-	d.mu.RLock()
-	dirs := make([]fs.Dirent, 0, len(d.dirs))
-	for n := range d.dirs {
-		dirs = append(dirs, dirDirEnt(n))
-	}
-	d.mu.RUnlock()
-	return dirs, nil
-}
-
-func (d *rootDirectory) Close() error {
-	return nil
-}
-
-func (d *rootDirectory) Stat() (int64, time.Time, time.Time, error) {
-	return 0, d.fs.mountTime, d.fs.mountTime, nil
-}
-
-// setDir sets the given path within the root directory to be served by the given fs.Directory
-func (d *rootDirectory) setDir(path string, newDir fs.Directory) {
-	d.mu.Lock()
-	defer d.mu.Unlock()
-
-	d.dirs[path] = newDir
-}
-
-func (d *rootDirectory) dir(path string) fs.Directory {
-	d.mu.RLock()
-	defer d.mu.RUnlock()
-
-	return d.dirLocked(path)
-}
-
-func (d *rootDirectory) dirLocked(path string) fs.Directory {
-	return d.dirs[path]
-}
diff --git a/src/sys/pkg/bin/pkgfs/pkgfs/unsupported_vnodes.go b/src/sys/pkg/bin/pkgfs/pkgfs/unsupported_vnodes.go
deleted file mode 100644
index 702d902..0000000
--- a/src/sys/pkg/bin/pkgfs/pkgfs/unsupported_vnodes.go
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2017 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// +build !build_with_native_toolchain
-
-package pkgfs
-
-import (
-	"log"
-	"time"
-
-	"go.fuchsia.dev/fuchsia/src/lib/thinfs/fs"
-)
-
-const (
-	fileType = "file"
-	dirType  = "dir"
-)
-
-func logUnsupportedOperation(node, nodeType, opName string) {
-	log.Printf("unsupported(%s): %s %s", string(node), nodeType, opName)
-}
-
-type unsupportedFile string
-
-// Export for testing.
-type UnsupportedFile = unsupportedFile
-
-func (f unsupportedFile) Close() error {
-	logUnsupportedOperation(string(f), fileType, "close")
-	return fs.ErrNotSupported
-}
-
-func (f unsupportedFile) Dup() (fs.File, error) {
-	logUnsupportedOperation(string(f), fileType, "dup")
-	return f, fs.ErrNotSupported
-}
-
-func (f unsupportedFile) Read(p []byte, off int64, whence int) (int, error) {
-	logUnsupportedOperation(string(f), fileType, "read")
-	return 0, fs.ErrNotSupported
-}
-
-func (f unsupportedFile) Seek(offset int64, whence int) (int64, error) {
-	logUnsupportedOperation(string(f), fileType, "seek")
-	return 0, fs.ErrNotSupported
-}
-
-func (f unsupportedFile) Stat() (int64, time.Time, time.Time, error) {
-	logUnsupportedOperation(string(f), fileType, "stat")
-	return 0, time.Now(), time.Now(), fs.ErrNotSupported
-}
-
-func (f unsupportedFile) Sync() error {
-	logUnsupportedOperation(string(f), fileType, "sync")
-	return fs.ErrNotSupported
-}
-
-func (f unsupportedFile) Tell() (int64, error) {
-	logUnsupportedOperation(string(f), fileType, "tell")
-	return 0, fs.ErrNotSupported
-}
-
-func (f unsupportedFile) Touch(lastAccess, lastModified time.Time) error {
-	logUnsupportedOperation(string(f), fileType, "touch")
-	return fs.ErrNotSupported
-}
-
-func (f unsupportedFile) Truncate(size uint64) error {
-	logUnsupportedOperation(string(f), fileType, "truncate")
-	return fs.ErrNotSupported
-}
-
-func (f unsupportedFile) Write(p []byte, off int64, whence int) (int, error) {
-	logUnsupportedOperation(string(f), fileType, "write")
-	return 0, fs.ErrNotSupported
-}
-
-func (f unsupportedFile) GetOpenFlags() fs.OpenFlags {
-	logUnsupportedOperation(string(f), fileType, "get_open_flags")
-	return 0
-}
-
-func (f unsupportedFile) SetOpenFlags(flags fs.OpenFlags) error {
-	logUnsupportedOperation(string(f), fileType, "set_open_flags")
-	return fs.ErrNotSupported
-}
-
-var _ = fs.File(unsupportedFile("impl-check"))
-
-type unsupportedDirectory string
-
-// Export for testing.
-type UnsupportedDirectory = unsupportedDirectory
-
-func (d unsupportedDirectory) Close() error {
-	logUnsupportedOperation(string(d), dirType, "close")
-	return fs.ErrNotSupported
-}
-
-func (d unsupportedDirectory) Dup() (fs.Directory, error) {
-	logUnsupportedOperation(string(d), dirType, "dup")
-	return nil, fs.ErrNotSupported
-}
-
-func (d unsupportedDirectory) Open(name string, flags fs.OpenFlags) (fs.File, fs.Directory, *fs.Remote, error) {
-	logUnsupportedOperation(string(d), dirType, "open")
-	return nil, nil, nil, fs.ErrNotSupported
-}
-
-func (d unsupportedDirectory) Read() ([]fs.Dirent, error) {
-	logUnsupportedOperation(string(d), dirType, "read")
-	return nil, fs.ErrNotSupported
-}
-
-func (d unsupportedDirectory) Rename(dstparent fs.Directory, src, dst string) error {
-	logUnsupportedOperation(string(d), dirType, "rename")
-	return fs.ErrNotSupported
-}
-
-func (d unsupportedDirectory) Stat() (int64, time.Time, time.Time, error) {
-	logUnsupportedOperation(string(d), dirType, "stat")
-	return 0, time.Now(), time.Now(), fs.ErrNotSupported
-}
-
-func (d unsupportedDirectory) Sync() error {
-	logUnsupportedOperation(string(d), dirType, "sync")
-	return fs.ErrNotSupported
-}
-
-func (d unsupportedDirectory) Touch(lastAccess, lastModified time.Time) error {
-	logUnsupportedOperation(string(d), dirType, "touch")
-	return fs.ErrNotSupported
-}
-func (d unsupportedDirectory) Unlink(target string) error {
-	logUnsupportedOperation(string(d), dirType, "unlink")
-	return fs.ErrNotSupported
-}
-
-var _ = fs.Directory(unsupportedDirectory("impl-check"))
diff --git a/src/sys/pkg/bin/pkgfs/pkgfs/validation_directory.go b/src/sys/pkg/bin/pkgfs/pkgfs/validation_directory.go
deleted file mode 100644
index a30c0a1..0000000
--- a/src/sys/pkg/bin/pkgfs/pkgfs/validation_directory.go
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2019 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// +build !build_with_native_toolchain
-
-package pkgfs
-
-import (
-	"sort"
-	"time"
-
-	"go.fuchsia.dev/fuchsia/src/lib/thinfs/fs"
-)
-
-// validationDir has two entries, present and missing which represent the blobs
-// from the static index which are in and not in (respectively) blobfs. This
-// uses Filesystem.ValidateStaticIndex() to determine what is or is not there.
-type validationDir struct {
-	unsupportedDirectory
-	fs *Filesystem
-}
-
-func (valDir *validationDir) Open(name string, flags fs.OpenFlags) (fs.File, fs.Directory, *fs.Remote, error) {
-	name = clean(name)
-	if name == "" {
-		return nil, valDir, nil, nil
-	}
-
-	// we only serve two files
-	if name != "present" && name != "missing" {
-		return nil, nil, nil, fs.ErrNotFound
-	}
-
-	// we're read only
-	if flags.Write() || flags.Truncate() || flags.Directory() || flags.Append() {
-		return nil, nil, nil, fs.ErrNotSupported
-	}
-
-	present, missing, err := valDir.fs.ValidateStaticIndex()
-	if err != nil {
-		return nil, nil, nil, fs.ErrFailedPrecondition
-	}
-
-	pEnts := make([]string, 0, len(present))
-	for entry := range present {
-		pEnts = append(pEnts, entry)
-	}
-
-	mEnts := make([]string, 0, len(missing))
-	for entry := range missing {
-		mEnts = append(mEnts, entry)
-	}
-
-	sort.Strings(pEnts)
-	sort.Strings(mEnts)
-
-	t := time.Now()
-	switch name {
-	case "present":
-		return &validationFile{unsupportedFile: unsupportedFile("present"), entries: pEnts, statTime: t}, nil, nil, nil
-	case "missing":
-		return &validationFile{unsupportedFile: unsupportedFile("missing"), entries: mEnts, statTime: t}, nil, nil, nil
-	default:
-		// should actually be impossible to get here given the check above
-		return nil, nil, nil, fs.ErrNotFound
-	}
-}
-
-func (valDir *validationDir) Read() ([]fs.Dirent, error) {
-	return []fs.Dirent{
-		fileDirEnt("missing"),
-		fileDirEnt("present"),
-	}, nil
-}
-
-func (valDir *validationDir) Stat() (int64, time.Time, time.Time, error) {
-	t := time.Now()
-	return 2, t, t, nil
-}
-
-const lineSize = int64(64 + 1)
-
-// validationFile represents the present or missing blobs in the static index.
-// The size of the file as reported by Stat() indicates the number of present
-// or missing blobs
-type validationFile struct {
-	unsupportedFile
-	entries  []string
-	statTime time.Time
-	seek     int64
-}
-
-func (valFile *validationFile) Stat() (int64, time.Time, time.Time, error) {
-	return int64(len(valFile.entries)) * lineSize, valFile.statTime, valFile.statTime, nil
-}
-
-// Read reads a maximum of len(p) bytes into "p" from the file at a location decided by "off"
-// and "whence".
-// The seek position is only updated if fs.WhenceFromCurrent is passed as whence.
-func (valFile *validationFile) Read(p []byte, off int64, whence int) (int, error) {
-	if whence == fs.WhenceFromCurrent {
-		valFile.seek += off
-	} else {
-		return 0, fs.ErrInvalidArgs
-	}
-
-	if valFile.seek < 0 {
-		valFile.seek = 0
-	}
-
-	listOffset := valFile.seek / lineSize
-	entPos := valFile.seek % lineSize
-
-	copied := 0
-	for ; copied < len(p); listOffset++ {
-		if listOffset >= int64(len(valFile.entries)) {
-			if copied < len(p) {
-				return copied, fs.ErrEOF
-			}
-			break
-		}
-
-		src := []byte(valFile.entries[listOffset][entPos:])
-		entPos = 0
-		moved := copy(p[copied:], src)
-		copied += moved
-		valFile.seek += int64(moved)
-
-		if len(p) > copied {
-			p[copied] = '\n'
-			copied++
-			valFile.seek++
-		}
-
-		if copied == len(p) {
-			return len(p), nil
-		}
-	}
-
-	return copied, nil
-}
-
-func (valFile *validationFile) Close() error {
-	valFile.seek = 0
-	return nil
-}
diff --git a/src/sys/pkg/bin/pkgfs/pkgfs/validation_directory_test.go b/src/sys/pkg/bin/pkgfs/pkgfs/validation_directory_test.go
deleted file mode 100644
index f776b06..0000000
--- a/src/sys/pkg/bin/pkgfs/pkgfs/validation_directory_test.go
+++ /dev/null
@@ -1,261 +0,0 @@
-// Copyright 2019 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// +build !build_with_native_toolchain
-
-package pkgfs
-
-import (
-	"fmt"
-	"testing"
-	"time"
-
-	"go.fuchsia.dev/fuchsia/src/lib/thinfs/fs"
-)
-
-var (
-	entries = []string{
-		"1111111111111111111111111111111111111111111111111111111111111111",
-		"2222222222222222222222222222222222222222222222222222222222222222",
-		"3333333333333333333333333333333333333333333333333333333333333333",
-		"4444444444444444444444444444444444444444444444444444444444444444",
-		"5555555555555555555555555555555555555555555555555555555555555555",
-		"6666666666666666666666666666666666666666666666666666666666666666",
-	}
-
-	file = []byte(
-		`1111111111111111111111111111111111111111111111111111111111111111
-2222222222222222222222222222222222222222222222222222222222222222
-3333333333333333333333333333333333333333333333333333333333333333
-4444444444444444444444444444444444444444444444444444444444444444
-5555555555555555555555555555555555555555555555555555555555555555
-6666666666666666666666666666666666666666666666666666666666666666
-`)
-)
-
-// TestWholeFileRead creates a buffer that is size of the expected file output
-// and tries to read this from the file in one call.
-func TestWholeFileRead(t *testing.T) {
-	// read the whole file
-	if lineSize != int64(len(entries[0])+1) {
-		t.Fatal("test setup error: entries length doesn't match expected line size")
-	}
-
-	bufSize := len(file)
-	buf := make([]byte, bufSize)
-
-	vf := newTestValidationFile(entries)
-	r, err := vf.Read(buf, 0, fs.WhenceFromCurrent)
-	if err != nil {
-		t.Fatalf("read failed: %s", err)
-	}
-	if r != len(file) {
-		t.Fatalf("unexpected read length, expected %d, got %d", bufSize, r)
-	}
-
-	if err = compareBuffers(file, buf, 0, t); err != nil {
-		t.Fatalf("%s", err)
-	}
-
-	r, err = vf.Read(buf[:1], 0, fs.WhenceFromCurrent)
-	if err != fs.ErrEOF {
-		t.Fatalf("EOF expected, instead found: %s", err)
-	}
-	if r != 0 {
-		t.Fatalf("file read beyond end of file reported length %d", r)
-	}
-}
-
-// TestOverRead creates a buffer larger than the whole file and passes that
-// buffer into the read call.
-func TestOverRead(t *testing.T) {
-	// try to read more than the whole file
-	if lineSize != int64(len(entries[0])+1) {
-		t.Fatal("test setup error: entries length doesn't match expected line size")
-	}
-
-	bufSize := len(file) * 2
-	buf := make([]byte, bufSize)
-
-	vf := newTestValidationFile(entries)
-	r, err := vf.Read(buf, 0, fs.WhenceFromCurrent)
-	if err != fs.ErrEOF {
-		t.Fatalf("EOF expected, instead found: %s", err)
-	}
-	if r != len(file) {
-		t.Fatalf("unexpected read length, expected %d, got %d", bufSize, r)
-	}
-
-	if err = compareBuffers(file, buf[:len(file)], 0, t); err != nil {
-		t.Fatalf("%s", err)
-	}
-}
-
-// TestMultipleRead tries to read the first half of the file in one call and
-// the remainder in the second call.
-func TestMultipleRead(t *testing.T) {
-	if lineSize != int64(len(entries[0])+1) {
-		t.Fatal("test setup error: entries length doesn't match expected line size")
-	}
-
-	// read half the file and then the next half
-	bufSize := len(file)
-	buf := make([]byte, bufSize)
-
-	vf := newTestValidationFile(entries)
-	firstHalf := len(buf) / 2
-	r, err := vf.Read(buf[0:firstHalf], 0, fs.WhenceFromCurrent)
-	if r != firstHalf {
-		t.Fatalf("unexpected read length reading first half of file, expected %d, got %d", firstHalf, r)
-	}
-
-	if err = compareBuffers(file[:firstHalf], buf[:firstHalf], 0, t); err != nil {
-		t.Fatalf("%s", err)
-	}
-
-	remaining := len(buf) - firstHalf
-	r, err = vf.Read(buf[firstHalf:], 0, fs.WhenceFromCurrent)
-	if r != remaining {
-		t.Fatalf("unexpected read length reading first half of file, expected %d, got %d", remaining, r)
-	}
-
-	if err = compareBuffers(file[firstHalf:], buf[firstHalf:], firstHalf, t); err != nil {
-		t.Fatalf("%s", err)
-	}
-}
-
-// TestSeekToLineBoundary tests what happens if we first seek to the end of an
-// entry, before the newline, and then read a line's worth of bytes.
-func TestSeekToLineBounary(t *testing.T) {
-	buf := make([]byte, lineSize)
-	seekOffset := lineSize - 1
-
-	vf := newTestValidationFile(entries)
-	r, err := vf.Read(buf, seekOffset, fs.WhenceFromCurrent)
-	if r != len(buf) {
-		t.Fatalf("unexpected read length, expected %d got %d", len(buf), r)
-	}
-
-	if err = compareBuffers(file[seekOffset:], buf, int(seekOffset), t); err != nil {
-		t.Fatalf("%s", err)
-	}
-}
-
-// TestNewLineAdded tests what happens if we read a whole entry without the new
-// line and then reads one more byte, which should be the new line
-func TestNewLineAdded(t *testing.T) {
-	// read 64 bytes, then read one more
-	buf := make([]byte, lineSize)
-	vf := newTestValidationFile(entries)
-	firstRead := len(buf) - 1
-	r, err := vf.Read(buf[0:firstRead], 0, fs.WhenceFromCurrent)
-
-	if r != firstRead {
-		t.Fatalf("unexpected read length reading first part of entry, expected %d, got %d",
-			firstRead, r)
-	}
-
-	if err = compareBuffers(file[:firstRead], buf[:firstRead], 0, t); err != nil {
-		t.Fatalf("%s", err)
-	}
-
-	r, err = vf.Read(buf[firstRead:], 0, fs.WhenceFromCurrent)
-	if r != len(buf)-firstRead {
-		t.Fatalf("unexpected read length reading second part of entry, expecting %d, got %d",
-			len(buf)-firstRead, r)
-	}
-
-	if err = compareBuffers(file[firstRead:], buf[firstRead:], firstRead, t); err != nil {
-		t.Fatalf("%s", err)
-	}
-}
-
-// TestReadsAcrossEntries reads half an entry and then a full entry's worth of
-// data.
-func TestReadsAcrossEntries(t *testing.T) {
-	// read half of an entry, read a full entry's worth
-
-	if lineSize != int64(len(entries[0])+1) {
-		t.Fatal("test setup error: entries length doesn't match expected line size")
-	}
-
-	buf := make([]byte, lineSize)
-	vf := newTestValidationFile(entries)
-	firstRead := len(buf) / 2
-	r, err := vf.Read(buf[0:firstRead], 0, fs.WhenceFromCurrent)
-
-	if r != firstRead {
-		t.Fatalf("unexpected read length reading first half of entry, expected %d, got %d",
-			firstRead, r)
-	}
-
-	if err = compareBuffers(file[:firstRead], buf[:firstRead], 0, t); err != nil {
-		t.Fatalf("%s", err)
-	}
-
-	r, err = vf.Read(buf[firstRead:], 0, fs.WhenceFromCurrent)
-	if r != len(buf)-firstRead {
-		t.Fatalf("unexpected read length reading second half of entry, expecting %d, got %d",
-			len(buf)-firstRead, r)
-	}
-
-	if err = compareBuffers(file[firstRead:], buf[firstRead:], firstRead, t); err != nil {
-		t.Fatalf("%s", err)
-	}
-}
-
-// TestBackwardSeek checks what happens if we read the whole file and then seek
-// backward an entry and a half.
-func TestBackwardSeek(t *testing.T) {
-	if lineSize != int64(len(entries[0])+1) {
-		t.Fatal("test setup error: entries length doesn't match expected line size")
-	}
-	bufSize := len(file)
-	buf := make([]byte, bufSize)
-
-	vf := newTestValidationFile(entries)
-	r, err := vf.Read(buf, 0, fs.WhenceFromCurrent)
-	if err != nil {
-		t.Fatalf("read failed: %s", err)
-	}
-	if r != len(file) {
-		t.Fatalf("unexpected read length, expected %d, got %d", bufSize, r)
-	}
-
-	if err = compareBuffers(file, buf[:len(file)], 0, t); err != nil {
-		t.Fatalf("%s", err)
-	}
-
-	secondReadSz := lineSize * 3 / 2
-	r, err = vf.Read(buf, 0-secondReadSz, fs.WhenceFromCurrent)
-	if err != fs.ErrEOF {
-		t.Fatalf("read expected EOF, but got: %s", err)
-	}
-	if int64(r) != secondReadSz {
-		t.Fatalf("unexpected read length, expected %d, got %d", secondReadSz, r)
-	}
-	fileOffset := len(file) - int(secondReadSz)
-	if err = compareBuffers(file[fileOffset:], buf[:secondReadSz], fileOffset, t); err != nil {
-		t.Fatalf("%s", err)
-	}
-}
-
-func newTestValidationFile(entries []string) *validationFile {
-	return &validationFile{
-		unsupportedFile: unsupportedFile("testing"),
-		entries:         entries,
-		statTime:        time.Now(),
-	}
-}
-
-func compareBuffers(expect, actual []byte, off int, t *testing.T) error {
-	var err error
-	for i, b := range actual {
-		if expect[i] != b {
-			err = fmt.Errorf("buffers differ")
-			t.Errorf("read file differs at index %d, expected %q, got %q", off+i, expect[i], b)
-		}
-	}
-	return err
-}
diff --git a/src/sys/pkg/bin/pkgfs/pkgfs/versions_directory.go b/src/sys/pkg/bin/pkgfs/pkgfs/versions_directory.go
deleted file mode 100644
index c83b55a..0000000
--- a/src/sys/pkg/bin/pkgfs/pkgfs/versions_directory.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2018 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// +build !build_with_native_toolchain
-
-package pkgfs
-
-import (
-	"path/filepath"
-	"strings"
-	"time"
-
-	"go.fuchsia.dev/fuchsia/src/lib/thinfs/fs"
-)
-
-// versionsDirectory lists packages by merkleroot and enables opening packages
-// by merkleroot.
-type versionsDirectory struct {
-	unsupportedDirectory
-
-	fs *Filesystem
-}
-
-func (d *versionsDirectory) Dup() (fs.Directory, error) {
-	return d, nil
-}
-
-func (d *versionsDirectory) Close() error { return nil }
-
-func (d *versionsDirectory) Open(name string, flags fs.OpenFlags) (fs.File, fs.Directory, *fs.Remote, error) {
-	name = clean(name)
-
-	if name == "" {
-		return nil, d, nil, nil
-	}
-
-	parts := strings.Split(name, "/")
-
-	if !merklePat.MatchString(parts[0]) {
-		return nil, nil, nil, fs.ErrNotFound
-	}
-
-	pkg, found := d.fs.index.GetRoot(parts[0])
-	if !found {
-		return nil, nil, nil, fs.ErrNotFound
-	}
-
-	pd, err := newPackageDirFromBlob(parts[0], d.fs, d.fs.shouldAllowExecutableOpenByPackageRoot(parts[0], pkg.Name))
-	if err != nil {
-		return nil, nil, nil, err
-	}
-
-	if len(parts) > 1 {
-		return pd.Open(filepath.Join(parts[1:]...), flags)
-	}
-
-	if flags.Create() || flags.Truncate() || flags.Write() || flags.Append() || flags.File() {
-		return nil, nil, nil, fs.ErrNotSupported
-	}
-
-	return nil, pd, nil, nil
-}
-
-func (d *versionsDirectory) Read() ([]fs.Dirent, error) {
-	roots := d.fs.index.PackageBlobs()
-
-	dents := make([]fs.Dirent, 0, len(roots))
-	for _, m := range roots {
-		dents = append(dents, fileDirEnt(m))
-	}
-	return dents, nil
-}
-
-func (d *versionsDirectory) Stat() (int64, time.Time, time.Time, error) {
-	return 0, d.fs.mountTime, d.fs.mountTime, nil
-}
diff --git a/src/sys/pkg/bin/pkgfs/pkgsvr/pkgsvr.go b/src/sys/pkg/bin/pkgfs/pkgsvr/pkgsvr.go
deleted file mode 100644
index 4ad7613..0000000
--- a/src/sys/pkg/bin/pkgfs/pkgsvr/pkgsvr.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2019 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-//go:build !build_with_native_toolchain
-// +build !build_with_native_toolchain
-
-package pkgsvr
-
-import (
-	"context"
-	"flag"
-	"log"
-	"syscall"
-	"syscall/zx"
-	"syscall/zx/fdio"
-
-	"go.fuchsia.dev/fuchsia/src/lib/component"
-	"go.fuchsia.dev/fuchsia/src/sys/pkg/bin/pkgfs/pkgfs"
-)
-
-// Main starts a package server program
-func Main() {
-	var (
-		blob                                   = flag.String("blob", "/blob", "Path at which to store blobs")
-		enforcePkgfsPackagesNonStaticAllowlist = flag.Bool("enforcePkgfsPackagesNonStaticAllowlist",
-			true,
-			"Whether to enforce the allowlist of non-static packages allowed to appear in /pkgfs/packages")
-		enforceNonBaseExecutabilityRestrictions = flag.Bool("enforceNonBaseExecutabilityRestrictions", true,
-			"Whether to enforce the restrictions to executability of files in packages to just packages in base or the allowlist")
-	)
-
-	log.SetPrefix("pkgsvr: ")
-	log.SetFlags(0) // no time required
-	flag.Parse()
-
-	sysPkg := flag.Arg(0)
-
-	blobDir, err := syscall.OpenPath(*blob, syscall.O_RDWR|syscall.O_DIRECTORY, 0777)
-	if err != nil {
-		log.Fatalf("pkgfs: failed to open %q: %s", *blob, err)
-	}
-
-	log.Printf("pkgfs: enforce pkgfs/packages non-static allowlist: %v", *enforcePkgfsPackagesNonStaticAllowlist)
-	log.Printf("pkgfs: enforce executability restrictions: %v", *enforceNonBaseExecutabilityRestrictions)
-	fs, err := pkgfs.New(blobDir.(*fdio.Directory), *enforcePkgfsPackagesNonStaticAllowlist, *enforceNonBaseExecutabilityRestrictions)
-	if err != nil {
-		log.Fatalf("pkgfs: initialization failed: %s", err)
-	}
-
-	h := component.GetStartupHandle(component.HandleInfo{Type: component.HandleUser0, Arg: 0})
-	if h == zx.HandleInvalid {
-		log.Fatalf("pkgfs: mount failed, no serving handle supplied in startup arguments")
-	}
-
-	if sysPkg != "" {
-		if err := fs.SetSystemRoot(sysPkg); err != nil {
-			log.Printf("system: failed to set system root from blob %q: %s", sysPkg, err)
-		}
-		log.Printf("system: will be served from %s", sysPkg)
-	} else {
-		log.Printf("system: no system package blob supplied")
-	}
-
-	log.Printf("pkgfs serving blobfs %s", *blob)
-	if err := fs.Serve(zx.Channel(h)); err != nil {
-		log.Fatalf("pkgfs: serve failed on startup handle: %s", err)
-	}
-	component.NewContextFromStartupInfo().BindStartupHandle(context.Background())
-}
diff --git a/src/sys/pkg/bin/pkgfs/ramdisk/ramdisk.go b/src/sys/pkg/bin/pkgfs/ramdisk/ramdisk.go
deleted file mode 100644
index 140d0f5..0000000
--- a/src/sys/pkg/bin/pkgfs/ramdisk/ramdisk.go
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2017 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-//go:build !build_with_native_toolchain
-// +build !build_with_native_toolchain
-
-package ramdisk
-
-// #cgo LDFLAGS: -lramdevice-client -lfdio -lzircon
-// #include <ramdevice-client/ramdisk.h>
-// #include <lib/fdio/directory.h>
-// #include <lib/fdio/fd.h>
-// #include <lib/fdio/fdio.h>
-// #include <lib/fdio/spawn.h>
-// #include <lib/fdio/vfs.h>
-// #include <string.h>
-// #include <zircon/syscalls.h>
-//
-// zx_status_t ramdisk_blobfs_mkfs(const ramdisk_client_t* client, zx_handle_t* process_out) {
-// 	zx_status_t status = ZX_OK;
-// 	fdio_spawn_action_t actions[1] = {
-// 		{
-// 			.action = FDIO_SPAWN_ACTION_ADD_HANDLE,
-// 			.h = {
-// 				.id = FS_HANDLE_BLOCK_DEVICE_ID,
-// 				.handle = ZX_HANDLE_INVALID,
-// 			}
-// 		},
-// 	};
-//
-// 	status = fdio_fd_clone(ramdisk_get_block_fd(client), &actions[0].h.handle);
-// 	if (status != ZX_OK) {
-// 		fprintf(stderr, "failed to get service handle! %d\n", status);
-// 		return status;
-// 	}
-// 	if (actions[0].h.handle == ZX_HANDLE_INVALID) {
-// 		fprintf(stderr, "handle invalid after clone\n");
-// 		return ZX_ERR_INTERNAL;
-// 	}
-// 	char* argv[4] = {"/pkg/bin/blobfs", "mkfs", 0};
-// 	return fdio_spawn_etc(ZX_HANDLE_INVALID, FDIO_SPAWN_CLONE_ALL, argv[0], (const char* const *)argv, NULL, 1, actions, process_out, NULL);
-// }
-//
-// zx_status_t ramdisk_blobfs_mount(const ramdisk_client_t* client, zx_handle_t dir_request, uint32_t flags, zx_handle_t* process_out) {
-// 	zx_status_t status = ZX_OK;
-// 	zx_handle_t export_root_client, export_root_server;
-// 	status = zx_channel_create(0, &export_root_client, &export_root_server);
-// 	if (status != ZX_OK) {
-// 		fprintf(stderr, "unable to create channel\n");
-// 		return status;
-// 	}
-// 	fdio_spawn_action_t actions[2] = {
-// 		{
-// 			.action = FDIO_SPAWN_ACTION_ADD_HANDLE,
-// 			.h = {
-// 				.id = FS_HANDLE_BLOCK_DEVICE_ID,
-// 				.handle = ZX_HANDLE_INVALID,
-// 			}
-// 		},
-// 		{
-// 			.action = FDIO_SPAWN_ACTION_ADD_HANDLE,
-// 			.h = {
-// 				.id = PA_DIRECTORY_REQUEST,
-// 				.handle = export_root_server,
-// 			}
-// 		}
-// 	};
-//
-// 	status = fdio_fd_clone(ramdisk_get_block_fd(client), &actions[0].h.handle);
-// 	if (status != ZX_OK) {
-// 		fprintf(stderr, "failed to get service handle! %d\n", status);
-// 		zx_handle_close(export_root_client);
-// 		zx_handle_close(export_root_server);
-// 		return status;
-// 	}
-// 	if (actions[0].h.handle == ZX_HANDLE_INVALID) {
-// 		fprintf(stderr, "handle invalid after clone\n");
-// 		zx_handle_close(export_root_client);
-// 		zx_handle_close(export_root_server);
-// 		return ZX_ERR_INTERNAL;
-// 	}
-// 	char* argv[4] = {"/pkg/bin/blobfs", "mount", 0};
-// 	status = fdio_spawn_etc(ZX_HANDLE_INVALID, FDIO_SPAWN_CLONE_ALL, argv[0], (const char* const *)argv, NULL, 2, actions, process_out, NULL);
-// 	if (status != ZX_OK) {
-// 		zx_handle_close(export_root_client);
-// 		return status;
-// 	}
-// 	status = fdio_open_at(export_root_client, "root", flags, dir_request);
-// 	zx_handle_close(export_root_client);
-// 	return status;
-// }
-import "C"
-
-import (
-	"context"
-	"os"
-	"runtime"
-	"syscall/zx"
-	"syscall/zx/fdio"
-	zxio "syscall/zx/io"
-	"syscall/zx/zxwait"
-
-	"fidl/fuchsia/io"
-
-	"go.fuchsia.dev/fuchsia/src/sys/pkg/bin/pkgfs/iou"
-)
-
-type Ramdisk struct {
-	ramdisk_client *C.struct_ramdisk_client
-	proc           zx.Handle
-	dir            *fdio.Directory
-}
-
-// New constructs and creates a ramdisk of size bytes at the given path.
-func New(size int) (*Ramdisk, error) {
-	r := &Ramdisk{}
-	return r, r.create(512, uint64(size)/512)
-}
-
-func (r *Ramdisk) create(blkSz uint64, blkCnt uint64) error {
-	n := C.ramdisk_create(C.uint64_t(blkSz), C.uint64_t(blkCnt), &r.ramdisk_client)
-	if n == 0 {
-		runtime.SetFinalizer(r, finalizeRamdisk)
-		return nil
-	}
-	return &zx.Error{Status: zx.Status(n), Text: "ramdisk_create"}
-}
-
-func (r *Ramdisk) Destroy() error {
-	if r.proc != zx.HandleInvalid {
-		zx.Sys_task_kill(r.proc)
-		r.dir.Close()
-	}
-	n := C.ramdisk_destroy(r.ramdisk_client)
-	if n == 0 {
-		return nil
-	}
-	return &zx.Error{Status: zx.Status(n), Text: "ramdisk_destroy"}
-}
-
-func (r *Ramdisk) StartBlobfs() error {
-	status := C.ramdisk_blobfs_mkfs(r.ramdisk_client, (*C.uint)(&r.proc))
-	if zx.Status(status) != zx.ErrOk {
-		return &zx.Error{Status: zx.Status(status), Text: "ramdisk_blobfs_mkfs"}
-	}
-	if _, err := zxwait.WaitContext(context.Background(), r.proc, zx.SignalTaskTerminated); err != nil {
-		return err
-	}
-
-	pxy, req, err := zx.NewChannel(0)
-	if err != nil {
-		return err
-	}
-	status = C.ramdisk_blobfs_mount(
-		r.ramdisk_client,
-		C.uint(uint32(zx.Handle(req))),
-		C.uint(io.OpenFlagsRightReadable|io.OpenFlagsRightWritable|io.OpenFlagsRightExecutable),
-		(*C.uint)(&r.proc),
-	)
-	if zx.Status(status) != zx.ErrOk {
-		pxy.Close()
-		return &zx.Error{Status: zx.Status(status), Text: "ramdisk_blobfs_mount"}
-	}
-
-	r.dir = fdio.NewDirectoryWithCtx(&zxio.DirectoryWithCtxInterface{Channel: pxy})
-	return nil
-}
-
-func (r *Ramdisk) Open(path string, flags int, mode uint32) (*os.File, error) {
-	return iou.OpenFrom(r.dir, path, flags, mode)
-}
-
-func finalizeRamdisk(r *Ramdisk) {
-	r.Destroy()
-}