[blobfs] Clean up paged BlobLoader path.

Separates out the result of loading a paged vs unpaged blob. The unpaged
path is unchanged.

The paged path is changed to return the PagerInfo and BlobLayout so the
caller (Blob in this case) can subsequently create the vmo and mapping.
This eliminates the need for the vmo creation callback which
significantly simplifies the structure of thise code.

This eliminates the vmo handle duplication in the new (unused) pager
code path which is the source of bugs.

Moves handling of null blobs out of the BlobLoader and into its caller
(the Blob).

There should be no observable behavior change.

Change-Id: I8d7fb4847307fec9ea79862bb160f020554c5953
Reviewed-on: https://fuchsia-review.googlesource.com/c/fuchsia/+/512862
Commit-Queue: Brett Wilson <brettw@google.com>
Reviewed-by: Martin Lindsay <mlindsay@google.com>
diff --git a/src/storage/blobfs/blob.cc b/src/storage/blobfs/blob.cc
index bfb1248..d28b0c4 100644
--- a/src/storage/blobfs/blob.cc
+++ b/src/storage/blobfs/blob.cc
@@ -159,8 +159,7 @@
   ZX_DEBUG_ASSERT(inode_.blob_size == 0);
   ZX_DEBUG_ASSERT(inode_.block_count == 0);
 
-  zx_status_t status = VerifyNullBlob();
-  if (status != ZX_OK) {
+  if (zx_status_t status = VerifyNullBlob(); status != ZX_OK) {
     return status;
   }
 
@@ -823,34 +822,28 @@
     set_overridden_cache_policy(*cache_policy);
   }
 
-  // Create the callback supplied to the loader to create the appropriate VMO. In the new pager, the
-  // VMO is created by the PagedVmo.
-  auto create_vmo = [this](BlobLayout::ByteCountType aligned_size,
-                           pager::UserPagerInfo info) -> zx::status<zx::vmo> {
-    if (auto status = EnsureCreateVmo(aligned_size); status.is_error())
-      return status.take_error();
+  zx::status<BlobLoader::PagedLoadResult> load_result =
+      blobfs_->loader().LoadBlobPaged(map_index_, &blobfs_->blob_corruption_notifier());
+  if (load_result.is_error())
+    return load_result.error_value();
 
-    // TODO(fxbug.dev/51111) this creates a duplicate handle to the data VMO for the loader to use
-    // since it expects to take ownership of the object. This is incorrect. Duplicating a vmo
-    // registered with the paging system causes unusual problems.
-    zx::vmo result;
-    if (zx_status_t status = vmo().duplicate(ZX_RIGHT_SAME_RIGHTS, &result); status != ZX_OK)
-      return zx::error(status);
+  // Make the vmo.
+  if (auto status = EnsureCreateVmo(load_result->layout->FileBlockAlignedSize()); status.is_error())
+    return status.take_error();
 
-    pager_info_ = std::move(info);
-    return zx::ok(std::move(result));
-  };
-  zx::status<BlobLoader::LoadResult> load_result = blobfs_->loader().LoadBlobPaged(
-      map_index_, std::move(create_vmo), &blobfs_->blob_corruption_notifier());
-  if (load_result.is_ok()) {
-    data_mapping_ = std::move(load_result->data_mapper);
-    merkle_mapping_ = std::move(load_result->merkle);
-  } else {
-    // On failure, we need to free the vmo which was created on this object for the loader to use.
-    FreeVmo();
+  // Map the result.
+  // TODO(fxbug.dev/74061): See if we can avoid doing this mapping.
+  if (zx_status_t status = data_mapping_.Map(vmo()); status != ZX_OK) {
+    FX_LOGS(ERROR) << "Failed to create mapping for data vmo: " << zx_status_get_string(status);
+    FreeVmo();  // Rollback the allocated vmo.
+    return status;
   }
 
-  return load_result.status_value();
+  // Commit the other load information.
+  pager_info_ = std::move(load_result->pager_info);
+  merkle_mapping_ = std::move(load_result->merkle);
+
+  return ZX_OK;
 }
 
 #else
@@ -866,32 +859,35 @@
     set_overridden_cache_policy(*cache_policy);
   }
 
-  // Create the callback supplied to the loader to create the appropriate VMO. In the old pager, a
-  // PageWatcher needs to be created and that object creates the VMO.
-  std::unique_ptr<pager::PageWatcher> created_page_watcher;
-  auto create_vmo = [this, &created_page_watcher](
-                        BlobLayout::ByteCountType aligned_size,
-                        pager::UserPagerInfo info) -> zx::status<zx::vmo> {
-    created_page_watcher = std::make_unique<pager::PageWatcher>(blobfs_->pager(), std::move(info));
+  zx::status<BlobLoader::PagedLoadResult> load_result =
+      blobfs_->loader().LoadBlobPaged(map_index_, &blobfs_->blob_corruption_notifier());
+  if (load_result.is_error())
+    return load_result.error_value();
 
-    zx::vmo data_vmo;
-    if (zx_status_t status = created_page_watcher->CreatePagedVmo(aligned_size, &data_vmo);
-        status != ZX_OK) {
-      return zx::error(status);
-    }
-    return zx::ok(std::move(data_vmo));
-  };
+  auto created_page_watcher =
+      std::make_unique<pager::PageWatcher>(blobfs_->pager(), std::move(load_result->pager_info));
 
-  zx::status<BlobLoader::LoadResult> load_result = blobfs_->loader().LoadBlobPaged(
-      map_index_, std::move(create_vmo), &blobfs_->blob_corruption_notifier());
-  if (load_result.is_ok()) {
-    data_mapping_ = std::move(load_result->data_mapper);
-    merkle_mapping_ = std::move(load_result->merkle);
-    vmo_ = std::move(load_result->data_vmo);
-    page_watcher_ = std::move(created_page_watcher);
+  // Make the vmo.
+  zx::vmo data_vmo;
+  if (zx_status_t status = created_page_watcher->CreatePagedVmo(
+          load_result->layout->FileBlockAlignedSize(), &data_vmo);
+      status != ZX_OK) {
+    return status;
   }
 
-  return load_result.status_value();
+  // Map the result.
+  // TODO(fxbug.dev/74061): See if we can avoid doing this mapping.
+  if (zx_status_t status = data_mapping_.Map(data_vmo); status != ZX_OK) {
+    FX_LOGS(ERROR) << "Failed to create mapping for data vmo: " << zx_status_get_string(status);
+    return status;
+  }
+
+  // Commit the load artifacts now that all setup has succeeded.
+  merkle_mapping_ = std::move(load_result->merkle);
+  vmo_ = std::move(data_vmo);
+  page_watcher_ = std::move(created_page_watcher);
+
+  return ZX_OK;
 }
 
 #endif
@@ -899,7 +895,7 @@
 zx_status_t Blob::LoadUnpagedVmosFromDisk() {
   ZX_ASSERT(!IsDataLoaded());
 
-  zx::status<BlobLoader::LoadResult> load_result =
+  zx::status<BlobLoader::UnpagedLoadResult> load_result =
       blobfs_->loader().LoadBlob(map_index_, &blobfs_->blob_corruption_notifier());
   if (load_result.is_ok()) {
     data_mapping_ = std::move(load_result->data_mapper);
@@ -914,6 +910,11 @@
   if (IsDataLoaded())
     return ZX_OK;
 
+  if (inode_.blob_size == 0) {
+    // Null blobs don't need any loading, just verification that they're correct.
+    return VerifyNullBlob();
+  }
+
   zx_status_t status;
   if (IsPagerBacked()) {
     status = LoadPagedVmosFromDisk();
diff --git a/src/storage/blobfs/blob_loader.cc b/src/storage/blobfs/blob_loader.cc
index 178ee6f4..15fe3dd 100644
--- a/src/storage/blobfs/blob_loader.cc
+++ b/src/storage/blobfs/blob_loader.cc
@@ -84,9 +84,9 @@
                            std::move(decompressor_client)));
 }
 
-zx::status<BlobLoader::LoadResult> BlobLoader::LoadBlob(
+zx::status<BlobLoader::UnpagedLoadResult> BlobLoader::LoadBlob(
     uint32_t node_index, const BlobCorruptionNotifier* corruption_notifier) {
-  LoadResult result;
+  UnpagedLoadResult result;
 
   ZX_DEBUG_ASSERT(read_mapper_.vmo().is_valid());
   auto inode = node_finder_->GetNode(node_index);
@@ -94,13 +94,15 @@
     return inode.take_error();
   }
 
-  // LoadBlob should only be called for Inodes. If this doesn't hold, one of two things happened:
+  // LoadBlob should only be called for nonempty Inodes. If this doesn't hold, one of two things
+  // happened:
   //   - Programmer error
   //   - Corruption of a blob's Inode
   // In either case it is preferable to ASSERT than to return an error here, since the first case
   // should happen only during development and in the second case there may be more corruption and
   // we want to unmount the filesystem before any more damage is done.
   ZX_ASSERT(inode->header.IsInode() && inode->header.IsAllocated());
+  ZX_ASSERT(inode->blob_size > 0);
 
   TRACE_DURATION("blobfs", "BlobLoader::LoadBlob", "blob_size", inode->blob_size);
 
@@ -110,15 +112,6 @@
     FX_LOGS(ERROR) << "Failed to create blob layout: " << blob_layout.status_string();
     return blob_layout.take_error();
   }
-  if (inode->blob_size == 0) {
-    // No data to load for the null blob.
-    if (zx_status_t status =
-            VerifyNullBlob(digest::Digest(inode->merkle_root_hash), corruption_notifier);
-        status != ZX_OK) {
-      return zx::error(status);
-    }
-    return zx::ok(std::move(result));
-  }
 
   std::unique_ptr<BlobVerifier> verifier;
   if (zx_status_t status = InitMerkleVerifier(node_index, *inode.value(), *blob_layout.value(),
@@ -154,10 +147,9 @@
   return zx::ok(std::move(result));
 }
 
-zx::status<BlobLoader::LoadResult> BlobLoader::LoadBlobPaged(
-    uint32_t node_index, CreateDataVmoCallback create_data,
-    const BlobCorruptionNotifier* corruption_notifier) {
-  LoadResult result;
+zx::status<BlobLoader::PagedLoadResult> BlobLoader::LoadBlobPaged(
+    uint32_t node_index, const BlobCorruptionNotifier* corruption_notifier) {
+  PagedLoadResult result;
 
   ZX_DEBUG_ASSERT(read_mapper_.vmo().is_valid());
   auto inode = node_finder_->GetNode(node_index);
@@ -165,65 +157,46 @@
     return inode.take_error();
   }
 
-  // LoadBlobPaged should only be called for Inodes. If this doesn't hold, one of two things
-  // happened:
+  // LoadBlobPaged should only be called for nonempty Inodes. If this doesn't hold, one of two
+  // things happened:
   //   - Programmer error
   //   - Corruption of a blob's Inode
   // In either case it is preferable to ASSERT than to return an error here, since the first case
   // should happen only during development and in the second case there may be more corruption and
   // we want to unmount the filesystem before any more damage is done.
   ZX_ASSERT(inode->header.IsInode() && inode->header.IsAllocated());
+  ZX_ASSERT(inode->blob_size > 0);
 
   TRACE_DURATION("blobfs", "BlobLoader::LoadBlobPaged", "blob_size", inode->blob_size);
 
-  auto blob_layout = BlobLayout::CreateFromInode(GetBlobLayoutFormat(txn_manager_->Info()),
-                                                 *inode.value(), GetBlockSize());
-  if (blob_layout.is_error()) {
+  // Create and save the layout.
+  auto blob_layout_or = BlobLayout::CreateFromInode(GetBlobLayoutFormat(txn_manager_->Info()),
+                                                    *inode.value(), GetBlockSize());
+  if (blob_layout_or.is_error()) {
     FX_LOGS(ERROR) << "Failed to create blob layout: "
-                   << zx_status_get_string(blob_layout.error_value());
-    return blob_layout.take_error();
+                   << zx_status_get_string(blob_layout_or.error_value());
+    return blob_layout_or.take_error();
   }
-  if (inode->blob_size == 0) {
-    // No data to load for the null blob.
-    if (zx_status_t status =
-            VerifyNullBlob(digest::Digest(inode->merkle_root_hash), corruption_notifier);
-        status != ZX_OK) {
-      return zx::error(status);
-    }
-    return zx::ok(std::move(result));
-  }
+  result.layout = std::move(blob_layout_or.value());
+  result.pager_info.identifier = node_index;
+  result.pager_info.data_start_bytes =
+      static_cast<uint64_t>(result.layout->DataBlockOffset()) * GetBlockSize();
+  result.pager_info.data_length_bytes = inode->blob_size;
 
-  std::unique_ptr<BlobVerifier> verifier;
-  if (zx_status_t status = InitMerkleVerifier(node_index, *inode.value(), *blob_layout.value(),
-                                              corruption_notifier, &result.merkle, &verifier);
+  if (zx_status_t status =
+          InitMerkleVerifier(node_index, *inode.value(), *result.layout, corruption_notifier,
+                             &result.merkle, &result.pager_info.verifier);
       status != ZX_OK) {
     return zx::error(status);
   }
 
-  std::unique_ptr<SeekableDecompressor> decompressor;
-  if (zx_status_t status = InitForDecompression(node_index, *inode.value(), *blob_layout.value(),
-                                                *verifier, &decompressor);
+  if (zx_status_t status =
+          InitForDecompression(node_index, *inode.value(), *result.layout,
+                               *result.pager_info.verifier, &result.pager_info.decompressor);
       status != ZX_OK) {
     return zx::error(status);
   }
 
-  pager::UserPagerInfo userpager_info;
-  userpager_info.identifier = node_index;
-  userpager_info.data_start_bytes = uint64_t{blob_layout->DataBlockOffset()} * GetBlockSize();
-  userpager_info.data_length_bytes = inode->blob_size;
-  userpager_info.verifier = std::move(verifier);
-  userpager_info.decompressor = std::move(decompressor);
-
-  auto data_vmo_or = create_data(blob_layout->FileBlockAlignedSize(), std::move(userpager_info));
-  if (data_vmo_or.is_error())
-    return data_vmo_or.take_error();
-  result.data_vmo = std::move(data_vmo_or.value());
-
-  if (zx_status_t status = result.data_mapper.Map(std::move(result.data_vmo)); status != ZX_OK) {
-    FX_LOGS(ERROR) << "Failed to create mapping for data vmo: " << zx_status_get_string(status);
-    return zx::error(status);
-  }
-
   return zx::ok(std::move(result));
 }
 
@@ -509,12 +482,12 @@
 
 zx_status_t BlobLoader::VerifyNullBlob(Digest merkle_root, const BlobCorruptionNotifier* notifier) {
   std::unique_ptr<BlobVerifier> verifier;
-  zx_status_t status;
-  if ((status = BlobVerifier::CreateWithoutTree(std::move(merkle_root), metrics_,
-                                                /*data_size=*/0, notifier, &verifier)) != ZX_OK) {
+  if (zx_status_t status =
+          BlobVerifier::CreateWithoutTree(std::move(merkle_root), metrics_, 0, notifier, &verifier);
+      status != ZX_OK) {
     return status;
   }
-  return verifier->Verify(/*data=*/nullptr, /*data_size=*/0, /*buffer_size=*/0);
+  return verifier->Verify(nullptr, 0, 0);
 }
 
 }  // namespace blobfs
diff --git a/src/storage/blobfs/blob_loader.h b/src/storage/blobfs/blob_loader.h
index 97d5b27..25883e0 100644
--- a/src/storage/blobfs/blob_loader.h
+++ b/src/storage/blobfs/blob_loader.h
@@ -32,7 +32,12 @@
 // contents as needed.
 class BlobLoader {
  public:
-  struct LoadResult {
+  struct PagedLoadResult {
+    pager::UserPagerInfo pager_info;
+    std::unique_ptr<BlobLayout> layout;
+    fzl::OwnedVmoMapper merkle;
+  };
+  struct UnpagedLoadResult {
     zx::vmo data_vmo;
     fzl::VmoMapper data_mapper;
 
@@ -62,27 +67,20 @@
   //  - The stored merkle tree is well-formed.
   //  - The blob's merkle root in |inode| matches the root of the merkle tree stored on-disk.
   //  - The blob's contents match the merkle tree.
-  zx::status<LoadResult> LoadBlob(uint32_t node_index,
-                                  const BlobCorruptionNotifier* corruption_notifier);
+  zx::status<UnpagedLoadResult> LoadBlob(uint32_t node_index,
+                                         const BlobCorruptionNotifier* corruption_notifier);
 
   // Loads the merkle tree for the blob referenced |inode|, and prepare a pager-backed VMO for
   // data.
   //
-  // |page_watcher_out| will be a PageWatcher that is backing |data_out|.
-  // |data_out| will be a pager-backed VMO with no resident pages, padded up to a block size.
-  // |merkle_out| will be a VMO containing the merkle tree of the blob. For small blobs, there
-  // may be no merkle tree, in which case no VMO is returned.
-  //
   // This method verifies the following correctness properties:
   //  - The stored merkle tree is well-formed.
   //  - The blob's merkle root in |inode| matches the root of the merkle tree stored on-disk.
   //
   // This method does *NOT* immediately verify the integrity of the blob's data, this will be
   // lazily verified by the pager as chunks of the blob are loaded.
-  using CreateDataVmoCallback = std::function<zx::status<zx::vmo>(
-      BlobLayout::ByteCountType aligned_size, pager::UserPagerInfo info)>;
-  zx::status<LoadResult> LoadBlobPaged(uint32_t node_index, CreateDataVmoCallback create_data,
-                                       const BlobCorruptionNotifier* corruption_notifier);
+  zx::status<PagedLoadResult> LoadBlobPaged(uint32_t node_index,
+                                            const BlobCorruptionNotifier* corruption_notifier);
 
  private:
   BlobLoader(TransactionManager* txn_manager, BlockIteratorProvider* block_iter_provider,
diff --git a/src/storage/blobfs/test/unit/blob_loader_test.cc b/src/storage/blobfs/test/unit/blob_loader_test.cc
index 205c888..d095bc1 100644
--- a/src/storage/blobfs/test/unit/blob_loader_test.cc
+++ b/src/storage/blobfs/test/unit/blob_loader_test.cc
@@ -91,6 +91,9 @@
   }
 
   // Remounts the filesystem, which ensures writes are flushed and caches are wiped.
+  //
+  // Any Blob references that the test holds will need to be released before this function is
+  // called or the BlobCache destructor will assert that there are live blobs.
   zx_status_t Remount() {
     auto block_device = Blobfs::Destroy(std::move(fs_));
     fs_.reset();
@@ -149,6 +152,25 @@
 
   uint32_t LookupInode(const BlobInfo& info) { return LookupBlob(info)->Ino(); }
 
+  zx_status_t LoadBlobData(Blob* blob, std::vector<uint8_t>* data) {
+    data->clear();
+
+    fs::VnodeAttributes attrs;
+    if (zx_status_t status = blob->GetAttributes(&attrs); status != ZX_OK)
+      return status;
+
+    // Always read, even for 0-length blobs, to make sure we test the read path in this case.
+    data->resize(attrs.content_size + 1);
+    size_t actual = 0xdedbeef;  // Make sure this gets written to.
+    if (zx_status_t status = blob->Read(&(*data)[0], data->size(), 0, &actual); status != ZX_OK)
+      return status;
+
+    EXPECT_EQ(attrs.content_size, actual);
+    data->resize(actual);
+
+    return ZX_OK;
+  }
+
   std::vector<uint8_t> LoadPagedBlobData(Blob* blob) {
     zx::vmo vmo;
     size_t size = 0;
@@ -215,30 +237,6 @@
 // support paging.
 using BlobLoaderPagedTest = BlobLoaderTest;
 
-// Implementation of the "create VMO callback" for the LoadBlobPaged call. Since our tests don't
-// actually need a real paged VMO, we can just create a regular one of the requested size.
-zx::status<zx::vmo> CreateDataVmo(BlobLayout::ByteCountType aligned_size, pager::UserPagerInfo) {
-  zx::vmo vmo;
-  if (zx_status_t status = zx::vmo::create(aligned_size, 0, &vmo); status != ZX_OK)
-    return zx::error(status);
-  return zx::ok(std::move(vmo));
-};
-
-TEST_P(BlobLoaderTest, NullBlob) {
-  size_t blob_len = 0;
-  std::unique_ptr<BlobInfo> info = AddBlob(blob_len);
-  ASSERT_EQ(Remount(), ZX_OK);
-
-  auto result = loader().LoadBlob(LookupInode(*info), nullptr);
-  ASSERT_TRUE(result.is_ok());
-
-  EXPECT_FALSE(result->data_vmo.is_valid());
-  EXPECT_EQ(result->data_mapper.size(), 0ul);
-
-  EXPECT_FALSE(result->merkle.vmo().is_valid());
-  EXPECT_EQ(result->merkle.size(), 0ul);
-}
-
 TEST_P(BlobLoaderTest, SmallBlob) {
   size_t blob_len = 1024;
   std::unique_ptr<BlobInfo> info = AddBlob(blob_len);
@@ -357,22 +355,23 @@
 }
 
 TEST_P(BlobLoaderTest, NullBlobWithCorruptedMerkleRootFailsToLoad) {
-  size_t blob_len = 0;
-  std::unique_ptr<BlobInfo> info = AddBlob(blob_len);
-  uint32_t inode_index = LookupInode(*info);
+  std::unique_ptr<BlobInfo> info = AddBlob(0);
 
-  // Verify the null blob can be read back.
-  auto result = loader().LoadBlob(inode_index, nullptr);
-  ASSERT_TRUE(result.is_ok());
+  // The added empty blob should be valid.
+  auto blob = LookupBlob(*info);
+  ASSERT_EQ(ZX_OK, blob->Verify());
+
+  std::vector<uint8_t> data;
+  ASSERT_EQ(ZX_OK, LoadBlobData(blob.get(), &data));
 
   uint8_t corrupt_merkle_root[digest::kSha256Length] = "-corrupt-null-blob-merkle-root-";
   {
     // Corrupt the null blob's merkle root.
     // |inode| holds a pointer into |fs_| and needs to be destroyed before remounting.
-    auto inode = fs_->GetNode(inode_index);
+    auto inode = fs_->GetNode(blob->Ino());
     memcpy(inode->merkle_root_hash, corrupt_merkle_root, sizeof(corrupt_merkle_root));
     BlobTransaction transaction;
-    uint64_t block = (inode_index * kBlobfsInodeSize) / kBlobfsBlockSize;
+    uint64_t block = (blob->Ino() * kBlobfsInodeSize) / kBlobfsBlockSize;
     transaction.AddOperation({.vmo = zx::unowned_vmo(fs_->GetAllocator()->GetNodeMapVmo().get()),
                               .op = {
                                   .type = storage::OperationType::kWrite,
@@ -384,18 +383,17 @@
   }
 
   // Remount the filesystem so the node cache will pickup the new name for the blob.
+  blob.reset();  // Required for Remount() to succeed.
   ASSERT_EQ(Remount(), ZX_OK);
 
   // Verify the empty blob can be found by the corrupt name.
   BlobInfo corrupt_info;
   Digest corrupt_digest(corrupt_merkle_root);
-  snprintf(corrupt_info.path, sizeof(info->path), "%s", corrupt_digest.ToString().c_str());
-  EXPECT_EQ(LookupInode(corrupt_info), inode_index);
+  strncpy(corrupt_info.path, corrupt_digest.ToString().c_str(), sizeof(info->path));
 
-  // Verify the null blob with a corrupted Merkle root fails to load.
-  auto failed_result = loader().LoadBlob(inode_index, nullptr);
-  ASSERT_TRUE(failed_result.is_error());
-  EXPECT_EQ(failed_result.error_value(), ZX_ERR_IO_DATA_INTEGRITY);
+  // Loading the data should report corruption.
+  auto corrupt_blob = LookupBlob(corrupt_info);
+  EXPECT_EQ(ZX_ERR_IO_DATA_INTEGRITY, LoadBlobData(corrupt_blob.get(), &data));
 }
 
 TEST_P(BlobLoaderTest, LoadBlobWithAnInvalidNodeIndexIsAnError) {
@@ -407,7 +405,7 @@
 
 TEST_P(BlobLoaderPagedTest, LoadBlobPagedWithAnInvalidNodeIndexIsAnError) {
   uint32_t invalid_node_index = kMaxNodeId - 1;
-  auto result = loader().LoadBlobPaged(invalid_node_index, &CreateDataVmo, nullptr);
+  auto result = loader().LoadBlobPaged(invalid_node_index, nullptr);
   ASSERT_TRUE(result.is_error());
   EXPECT_EQ(result.error_value(), ZX_ERR_INVALID_ARGS);
 }
@@ -424,7 +422,7 @@
   inode->header.next_node = invalid_node_index;
   inode->extent_count = 2;
 
-  auto result = loader().LoadBlobPaged(node_index, &CreateDataVmo, nullptr);
+  auto result = loader().LoadBlobPaged(node_index, nullptr);
   ASSERT_TRUE(result.is_error());
   EXPECT_EQ(result.error_value(), ZX_ERR_IO_DATA_INTEGRITY);
 }