[blobfs][rac] Use seek table-only API
This change uses a new (not upstreamed) API for the ZSTD Seekable seek
table to perform seek table lookups, and the standard ZSTD streaming API
for reading whole frames.
Change-Id: I026809acff79e0fb37e14dc4190a2034e118302f
diff --git a/zircon/system/ulib/blobfs/blobfs.cc b/zircon/system/ulib/blobfs/blobfs.cc
index 21e69c0..3900cd8 100644
--- a/zircon/system/ulib/blobfs/blobfs.cc
+++ b/zircon/system/ulib/blobfs/blobfs.cc
@@ -870,7 +870,7 @@
// Only supported paged compression format is ZSTD Seekable.
ZX_DEBUG_ASSERT(info && info->compression_algorithm == CompressionAlgorithm::ZSTD_SEEKABLE);
- // Assume |ExtendReadRange| already called on |offset| and |length|.
+ // Assume |ExtendReadRange()| already called on |offset| and |length|.
ZX_DEBUG_ASSERT(offset % kBlobfsBlockSize == 0);
ZX_DEBUG_ASSERT(length % kBlobfsBlockSize == 0 || offset + length == info->data_length_bytes);
@@ -886,14 +886,21 @@
return status;
}
- status = compressed_blobs_for_paging_->Read(
- info->identifier, static_cast<uint8_t*>(mapping.start()), offset, length);
+ uint64_t read_offset = offset;
+ uint64_t read_length = length;
+ status =
+ compressed_blobs_for_paging_->Read(info->identifier, static_cast<uint8_t*>(mapping.start()),
+ mapping.size(), &read_offset, &read_length);
if (status != ZX_OK) {
FS_TRACE_ERROR("blobfs: Failed to read from ZSTD Seekable archive to service page fault: %s\n",
zx_status_get_string(status));
return status;
}
+ // Assume |ExtendReadRange()| already calibrated to line up with frame boundaries.
+ ZX_DEBUG_ASSERT(read_offset == offset);
+ ZX_DEBUG_ASSERT(read_length == length);
+
return ZX_OK;
}
diff --git a/zircon/system/ulib/blobfs/compression/zstd-seekable-blob-collection.cc b/zircon/system/ulib/blobfs/compression/zstd-seekable-blob-collection.cc
index da4a5d2..5cd1ef0 100644
--- a/zircon/system/ulib/blobfs/compression/zstd-seekable-blob-collection.cc
+++ b/zircon/system/ulib/blobfs/compression/zstd-seekable-blob-collection.cc
@@ -31,6 +31,13 @@
std::unique_ptr<ZSTDSeekableBlobCollection> cbc(new ZSTDSeekableBlobCollection(
vmoid_registry, space_manager, txn_handler, node_finder, kZSTDSeekableBlobCacheSize));
+ // Initialize shared decompression stream.
+ cbc->d_stream_ = ZSTD_createDStream();
+ if (cbc->d_stream_ == nullptr) {
+ FS_TRACE_ERROR("[blobfs][compressed] Failed to create shared decompression stream\n");
+ return ZX_ERR_INTERNAL;
+ }
+
// Initialize shared transfer buffer.
zx_status_t status = zx::vmo::create(kCompressedTransferBufferBytes, 0, &cbc->transfer_vmo_);
if (status != ZX_OK) {
@@ -60,7 +67,10 @@
return ZX_OK;
}
-ZSTDSeekableBlobCollection::~ZSTDSeekableBlobCollection() { mapped_vmo_.Unmap(); }
+ZSTDSeekableBlobCollection::~ZSTDSeekableBlobCollection() {
+ mapped_vmo_.Unmap();
+ ZSTD_freeDStream(d_stream_);
+}
ZSTDSeekableBlobCollection::ZSTDSeekableBlobCollection(storage::VmoidRegistry* vmoid_registry,
SpaceManager* space_manager,
@@ -70,12 +80,13 @@
txn_handler_(txn_handler),
node_finder_(node_finder),
vmoid_(vmoid_registry),
- cache_(cache_size) {}
+ cache_(cache_size),
+ d_stream_(nullptr) {}
-zx_status_t ZSTDSeekableBlobCollection::Read(uint32_t node_index, uint8_t* buf,
- uint64_t data_byte_offset, uint64_t num_bytes) {
+zx_status_t ZSTDSeekableBlobCollection::Read(uint32_t node_index, uint8_t* buf, uint64_t buf_size,
+ uint64_t* data_byte_offset, uint64_t* num_bytes) {
InodePtr node = node_finder_->GetNode(node_index);
- if (!node) {
+ if (node == nullptr) {
FS_TRACE_ERROR("[blobfs][compressed] Invalid node index: %u\n", node_index);
return ZX_ERR_INVALID_ARGS;
}
@@ -97,8 +108,8 @@
&mapped_vmo_, &vmoid_, kCompressedTransferBufferBlocks, space_manager_, txn_handler_,
node_finder_, node_index, num_merkle_blocks);
std::unique_ptr<ZSTDSeekableBlob> new_blob;
- zx_status_t status =
- ZSTDSeekableBlob::Create(node_index, &mapped_vmo_, std::move(blocks), &new_blob);
+ zx_status_t status = ZSTDSeekableBlob::Create(node_index, d_stream_, &mapped_vmo_,
+ std::move(blocks), &new_blob);
if (status != ZX_OK) {
FS_TRACE_ERROR("[blobfs][compressed] Failed to construct ZSTDSeekableBlob: %s\n",
zx_status_get_string(status));
@@ -109,12 +120,12 @@
ZX_ASSERT(blob != nullptr);
}
- zx_status_t status = blob->Read(buf, data_byte_offset, num_bytes);
+ zx_status_t status = blob->Read(buf, buf_size, data_byte_offset, num_bytes);
if (status != ZX_OK) {
FS_TRACE_ERROR(
"[blobfs][compressed] Failed to Read from blob: node_index=%u, data_byte_offset=%lu, "
"num_bytes=%lu: %s\n",
- node_index, data_byte_offset, num_bytes, zx_status_get_string(status));
+ node_index, *data_byte_offset, *num_bytes, zx_status_get_string(status));
return status;
}
}
diff --git a/zircon/system/ulib/blobfs/compression/zstd-seekable-blob-collection.h b/zircon/system/ulib/blobfs/compression/zstd-seekable-blob-collection.h
index 58cbd96..e37f261 100644
--- a/zircon/system/ulib/blobfs/compression/zstd-seekable-blob-collection.h
+++ b/zircon/system/ulib/blobfs/compression/zstd-seekable-blob-collection.h
@@ -63,8 +63,8 @@
// Load exactly |num_bytes| bytes starting at _uncompressed_ file contents byte offset
// |data_byte_offset| from blob identified by inode index |node_index| into |buf|. The value of
// data in |buf| is expected to be valid if and only if the return value is |ZX_OK|.
- zx_status_t Read(uint32_t node_index, uint8_t* buf, uint64_t data_byte_offset,
- uint64_t num_bytes);
+ zx_status_t Read(uint32_t node_index, uint8_t* buf, uint64_t buf_size, uint64_t* data_byte_offset,
+ uint64_t* num_bytes);
private:
ZSTDSeekableBlobCollection(storage::VmoidRegistry* vmoid_registry, SpaceManager* space_manager,
@@ -90,6 +90,7 @@
storage::OwnedVmoid vmoid_;
fzl::VmoMapper mapped_vmo_;
ZSTDSeekableLRUBlobCache cache_;
+ ZSTD_DStream* d_stream_;
};
} // namespace blobfs
diff --git a/zircon/system/ulib/blobfs/compression/zstd-seekable-blob.cc b/zircon/system/ulib/blobfs/compression/zstd-seekable-blob.cc
index d5ce0f5..8e200bb 100644
--- a/zircon/system/ulib/blobfs/compression/zstd-seekable-blob.cc
+++ b/zircon/system/ulib/blobfs/compression/zstd-seekable-blob.cc
@@ -218,23 +218,154 @@
}
zx_status_t ZSTDSeekableBlob::Create(
- uint32_t node_index, fzl::VmoMapper* mapped_vmo,
+ uint32_t node_index, ZSTD_DStream* d_stream, fzl::VmoMapper* mapped_vmo,
std::unique_ptr<ZSTDCompressedBlockCollection> compressed_block_collection,
std::unique_ptr<ZSTDSeekableBlob>* out) {
- std::unique_ptr<ZSTDSeekableBlob> blob(
- new ZSTDSeekableBlob(node_index, mapped_vmo, std::move(compressed_block_collection)));
+ std::unique_ptr<ZSTDSeekableBlob> blob(new ZSTDSeekableBlob(
+ node_index, d_stream, mapped_vmo, std::move(compressed_block_collection)));
zx_status_t status = blob->ReadHeader();
if (status != ZX_OK) {
return status;
}
+ status = blob->LoadSeekTable();
*out = std::move(blob);
return ZX_OK;
}
-zx_status_t ZSTDSeekableBlob::Read(uint8_t* buf, uint64_t data_byte_offset, uint64_t num_bytes) {
- TRACE_DURATION("blobfs", "ZSTDSeekableBlob::Read", "data byte offset", data_byte_offset,
- "num bytes", num_bytes);
+zx_status_t ZSTDSeekableBlob::Read(uint8_t* buf, uint64_t buf_size, uint64_t* data_byte_offset,
+ uint64_t* num_bytes) {
+ ZX_DEBUG_ASSERT(buf != nullptr);
+ ZX_DEBUG_ASSERT(data_byte_offset != nullptr);
+ ZX_DEBUG_ASSERT(num_bytes != nullptr);
+
+ TRACE_DURATION("blobfs", "ZSTDSeekableBlob::Read", "data byte offset", *data_byte_offset,
+ "num bytes", *num_bytes);
+
+ if (*num_bytes == 0) {
+ return ZX_OK;
+ }
+
+ size_t zstd_return = ZSTD_DCtx_reset(d_stream_, ZSTD_reset_session_only);
+ if (ZSTD_isError(zstd_return)) {
+ FS_TRACE_ERROR("[blobfs][zstd-seekable] Failed to reset decompression stream: %s\n",
+ ZSTD_getErrorName(zstd_return));
+ return ZX_ERR_INTERNAL;
+ }
+ zstd_return = ZSTD_DCtx_refDDict(d_stream_, nullptr);
+ if (ZSTD_isError(zstd_return)) {
+ FS_TRACE_ERROR(
+ "[blobfs][zstd-seekable] Failed to reset dictionary for decompression stream: %s\n",
+ ZSTD_getErrorName(zstd_return));
+ return ZX_ERR_INTERNAL;
+ }
+
+ unsigned first_frame = ZSTD_seekTable_offsetToFrameIndex(seek_table_, *data_byte_offset);
+ unsigned uncompressed_frame_byte_start =
+ ZSTD_seekTable_getFrameDecompressedOffset(seek_table_, first_frame);
+ unsigned compressed_frame_byte_start =
+ ZSTD_seekTable_getFrameCompressedOffset(seek_table_, first_frame);
+
+ unsigned last_frame =
+ ZSTD_seekTable_offsetToFrameIndex(seek_table_, (*data_byte_offset) + +(*num_bytes) - 1);
+ unsigned uncompressed_frame_byte_end =
+ ZSTD_seekTable_getFrameDecompressedOffset(seek_table_, last_frame) +
+ ZSTD_seekTable_getFrameDecompressedSize(seek_table_, last_frame);
+ unsigned compressed_frame_byte_end =
+ ZSTD_seekTable_getFrameCompressedOffset(seek_table_, last_frame) +
+ ZSTD_seekTable_getFrameCompressedSize(seek_table_, last_frame);
+
+ if (uncompressed_frame_byte_end <= uncompressed_frame_byte_start) {
+ FS_TRACE_ERROR("[blobfs][zstd-seekable] End block overflow\n");
+ return ZX_ERR_OUT_OF_RANGE;
+ }
+
+ unsigned uncompressed_frame_byte_size =
+ uncompressed_frame_byte_end - uncompressed_frame_byte_start;
+ if (buf_size < uncompressed_frame_byte_size) {
+ FS_TRACE_ERROR("[blobfs][zstd-seekable] Uncompressed output buffer too small: %lu < %u\n",
+ buf_size, uncompressed_frame_byte_size);
+ return ZX_ERR_BUFFER_TOO_SMALL;
+ }
+
+ // ZSTD Seekable blob data contains: [header][zstd-seekable-archive].
+ unsigned blob_byte_start = kZSTDSeekableHeaderSize + compressed_frame_byte_start;
+ unsigned blob_byte_end = kZSTDSeekableHeaderSize + compressed_frame_byte_end;
+ unsigned blob_block_byte_offset = fbl::round_down(blob_byte_start, kBlobfsBlockSize);
+ unsigned blob_block_offset_unsigned = blob_block_byte_offset / kBlobfsBlockSize;
+ if (blob_block_offset_unsigned > std::numeric_limits<uint32_t>::max()) {
+ FS_TRACE_ERROR("[blobfs][zstd-seekable] Start block overflow\n");
+ return ZX_ERR_OUT_OF_RANGE;
+ }
+ uint32_t blob_block_offset = static_cast<uint32_t>(blob_block_offset_unsigned);
+
+ unsigned blob_block_end = fbl::round_up(blob_byte_end, kBlobfsBlockSize) / kBlobfsBlockSize;
+ if (blob_block_end <= blob_block_offset_unsigned) {
+ FS_TRACE_ERROR("[blobfs][zstd-seekable] End block overflow\n");
+ return ZX_ERR_OUT_OF_RANGE;
+ }
+
+ unsigned num_blocks_unsigned = blob_block_end - blob_block_offset_unsigned;
+ if (num_blocks_unsigned > std::numeric_limits<uint32_t>::max()) {
+ FS_TRACE_ERROR("[blobfs][zstd-seekable] Number of block overflow\n");
+ return ZX_ERR_OUT_OF_RANGE;
+ }
+ uint32_t num_blocks = static_cast<uint32_t>(num_blocks_unsigned);
+
+ zx_status_t status = compressed_block_collection_->Read(blob_block_offset, num_blocks);
+ if (status != ZX_OK) {
+ FS_TRACE_ERROR("[blobfs][zstd-seekable] Failed to read from compressed block collection: %s\n",
+ zx_status_get_string(status));
+ return status;
+ }
+
+ ZSTD_inBuffer compressed_buf = ZSTD_inBuffer{
+ .src = mapped_vmo_->start(),
+ .size = mapped_vmo_->size(),
+ .pos = blob_byte_start - blob_block_byte_offset,
+ };
+ ZSTD_outBuffer uncompressed_buf = ZSTD_outBuffer{
+ .dst = buf,
+ .size = uncompressed_frame_byte_size,
+ .pos = 0,
+ };
+
+ size_t prev_output_pos = 0;
+ do {
+ prev_output_pos = uncompressed_buf.pos;
+ zstd_return = ZSTD_decompressStream(d_stream_, &uncompressed_buf, &compressed_buf);
+ if (ZSTD_isError(zstd_return)) {
+ FS_TRACE_ERROR("[blobfs][zstd-seekable] Failed to decompress: %s\n",
+ ZSTD_getErrorName(zstd_return));
+ return ZX_ERR_INTERNAL;
+ }
+ } while (uncompressed_buf.pos < uncompressed_buf.size && prev_output_pos != uncompressed_buf.pos);
+ if (uncompressed_buf.pos < uncompressed_buf.size) {
+ FS_TRACE_ERROR(
+ "[blobfs][zstd-seekable] Decompression stopped making progress before decompressing all "
+ "bytes\n");
+ return ZX_ERR_INTERNAL;
+ }
+
+ *data_byte_offset = uncompressed_frame_byte_start;
+ *num_bytes = uncompressed_frame_byte_size;
+ return ZX_OK;
+}
+
+ZSTDSeekableBlob::ZSTDSeekableBlob(
+ uint32_t node_index, ZSTD_DStream* d_stream, fzl::VmoMapper* mapped_vmo,
+ std::unique_ptr<ZSTDCompressedBlockCollection> compressed_block_collection)
+ : node_index_(node_index),
+ mapped_vmo_(mapped_vmo),
+ compressed_block_collection_(std::move(compressed_block_collection)),
+ seek_table_(nullptr),
+ d_stream_(d_stream) {}
+
+zx_status_t ZSTDSeekableBlob::LoadSeekTable() {
+ zx_status_t status = ReadHeader();
+ if (status != ZX_OK) {
+ return status;
+ }
ZSTD_seekable* d_stream = ZSTD_seekable_create();
if (d_stream == nullptr) {
@@ -262,42 +393,16 @@
return ZX_ERR_INTERNAL;
}
- size_t decompressed = 0;
- do {
- zstd_return =
- ZSTD_seekable_decompress(d_stream, buf, num_bytes, data_byte_offset + decompressed);
- if (ZSTD_isError(zstd_return)) {
- FS_TRACE_ERROR("[blobfs][zstd-seekable] Failed to decompress: %s\n",
- ZSTD_getErrorName(zstd_return));
- if (zstd_seekable_file.status != ZX_OK) {
- return zstd_seekable_file.status;
- }
-
- return ZX_ERR_IO_DATA_INTEGRITY;
- }
-
- // Non-error case: |ZSTD_seekable_decompress| returns number of bytes decompressed.
- decompressed += zstd_return;
-
- // From the ZSTD_seekable_decompress Documentation:
- // The return value is the number of bytes decompressed, or an error code checkable with
- // ZSTD_isError().
- // Assume that a return value of 0 indicates, not only that 0 bytes were decompressed, but also
- // that there are no more bytes to decompress.
- } while (zstd_return > 0 && decompressed < num_bytes);
-
- // TODO(markdittmer): Perform verification over block-aligned data that was read.
+ zstd_return = ZSTD_seekable_copySeekTable(d_stream, &seek_table_);
+ if (ZSTD_isError(zstd_return)) {
+ FS_TRACE_ERROR("[blobfs][zstd-seekable] Failed to initialize seek table: %s\n",
+ ZSTD_getErrorName(zstd_return));
+ return ZX_ERR_INTERNAL;
+ }
return ZX_OK;
}
-ZSTDSeekableBlob::ZSTDSeekableBlob(
- uint32_t node_index, fzl::VmoMapper* mapped_vmo,
- std::unique_ptr<ZSTDCompressedBlockCollection> compressed_block_collection)
- : node_index_(node_index),
- mapped_vmo_(mapped_vmo),
- compressed_block_collection_(std::move(compressed_block_collection)) {}
-
zx_status_t ZSTDSeekableBlob::ReadHeader() {
// The header is an internal BlobFS data structure that fits into one block.
static_assert(kZSTDSeekableHeaderSize <= kBlobfsBlockSize);
diff --git a/zircon/system/ulib/blobfs/compression/zstd-seekable-blob.h b/zircon/system/ulib/blobfs/compression/zstd-seekable-blob.h
index b7f5f2d..b8dd9f5 100644
--- a/zircon/system/ulib/blobfs/compression/zstd-seekable-blob.h
+++ b/zircon/system/ulib/blobfs/compression/zstd-seekable-blob.h
@@ -33,7 +33,8 @@
// Load into |buf| exactly |num_bytes| bytes starting at _uncompressed_ file contents byte offset
// |data_byte_offset|. The value of data in |buf| is expected to be valid if and only if the
// return value is |ZX_OK|.
- virtual zx_status_t Read(uint8_t* buf, uint64_t data_byte_offset, uint64_t num_bytes) = 0;
+ virtual zx_status_t Read(uint8_t* buf, uint64_t buf_size, uint64_t* data_byte_offset,
+ uint64_t* num_bytes) = 0;
};
// ZSTDSeekableBlob as an implementation of |RandomAccessCompressedBlob| that uses the ZSTD Seekable
@@ -45,12 +46,13 @@
// Create a |ZSTDSeekableBlob|. It is the invoker's responsibility to ensure that the VMO
// populated on |compressed_block_collection.Read()| corresponds to |mapped_vmo|.
static zx_status_t Create(
- uint32_t node_index, fzl::VmoMapper* mapped_vmo,
+ uint32_t node_index, ZSTD_DStream* d_stream, fzl::VmoMapper* mapped_vmo,
std::unique_ptr<ZSTDCompressedBlockCollection> compressed_block_collection,
std::unique_ptr<ZSTDSeekableBlob>* out);
// RandomAccessCompressedBlob implementation.
- zx_status_t Read(uint8_t* buf, uint64_t data_byte_offset, uint64_t num_bytes) final;
+ zx_status_t Read(uint8_t* buf, uint64_t buf_size, uint64_t* data_byte_offset,
+ uint64_t* num_bytes) final;
const uint8_t* compressed_data_start() const {
return static_cast<uint8_t*>(mapped_vmo_->start());
@@ -59,15 +61,18 @@
uint32_t node_index() { return node_index_; }
private:
- ZSTDSeekableBlob(uint32_t node_index, fzl::VmoMapper* mapped_vmo,
+ ZSTDSeekableBlob(uint32_t node_index, ZSTD_DStream* d_stream, fzl::VmoMapper* mapped_vmo,
std::unique_ptr<ZSTDCompressedBlockCollection> compressed_block_collection);
+ zx_status_t LoadSeekTable();
zx_status_t ReadHeader();
uint32_t node_index_;
ZSTDSeekableHeader header_;
fzl::VmoMapper* mapped_vmo_;
std::unique_ptr<ZSTDCompressedBlockCollection> compressed_block_collection_;
+ ZSTD_seekTable* seek_table_;
+ ZSTD_DStream* d_stream_;
};
// Type used for opaque pointer in ZSTD Seekable custom |ZSTD_seekable_seek| and
diff --git a/zircon/system/ulib/blobfs/test/unit/zstd-seekable-blob-test.cc b/zircon/system/ulib/blobfs/test/unit/zstd-seekable-blob-test.cc
index 8c094b5..979baf3 100644
--- a/zircon/system/ulib/blobfs/test/unit/zstd-seekable-blob-test.cc
+++ b/zircon/system/ulib/blobfs/test/unit/zstd-seekable-blob-test.cc
@@ -80,8 +80,10 @@
void CheckRead(uint32_t node_index, std::vector<uint8_t>* buf, std::vector<uint8_t>* expected_buf,
uint64_t data_byte_offset, uint64_t num_bytes) {
uint8_t* expected = expected_buf->data() + data_byte_offset;
+ uint64_t offset = data_byte_offset;
+ uint64_t length = num_bytes;
ASSERT_OK(
- compressed_blob_collection()->Read(node_index, buf->data(), data_byte_offset, num_bytes));
+ compressed_blob_collection()->Read(node_index, buf->data(), buf->size(), &offset, &length));
ASSERT_BYTES_EQ(expected, buf->data(), num_bytes);
}
@@ -246,7 +248,10 @@
auto blocks_for_file = blocks.get();
std::unique_ptr<ZSTDSeekableBlob> blob;
- ASSERT_OK(ZSTDSeekableBlob::Create(node_index, &mapper, std::move(blocks), &blob));
+ ZSTD_DStream* d_stream = ZSTD_createDStream();
+ ASSERT_NOT_NULL(d_stream);
+ auto clean_up = fbl::MakeAutoCall([&]() { ZSTD_freeDStream(d_stream); });
+ ASSERT_OK(ZSTDSeekableBlob::Create(node_index, d_stream, &mapper, std::move(blocks), &blob));
ZSTDSeekableFile file = ZSTDSeekableFile{
.blob = blob.get(),
@@ -275,7 +280,10 @@
std::vector<uint8_t> buf(blob_info->size_data);
std::vector<uint8_t> expected(blob_info->size_data);
ZeroToSevenBlobSrcFunction(reinterpret_cast<char*>(expected.data()), blob_info->size_data);
- ASSERT_OK(compressed_blob_collection()->Read(node_index, buf.data(), 0, blob_info->size_data));
+ uint64_t offset = 0;
+ uint64_t length = blob_info->size_data;
+ ASSERT_OK(
+ compressed_blob_collection()->Read(node_index, buf.data(), buf.size(), &offset, &length));
ASSERT_BYTES_EQ(expected.data(), buf.data(), blob_info->size_data);
}
@@ -338,8 +346,10 @@
// Attempt to read one byte passed the end of the blob.
std::vector<uint8_t> buf(1);
- ASSERT_EQ(ZX_ERR_IO_DATA_INTEGRITY,
- compressed_blob_collection()->Read(node_index, buf.data(), blob_info->size_data, 1));
+ uint64_t offset = blob_info->size_data;
+ uint64_t length = 1;
+ ASSERT_EQ(ZX_ERR_IO_DATA_INTEGRITY, compressed_blob_collection()->Read(
+ node_index, buf.data(), buf.size(), &offset, &length));
}
TEST_F(ZSTDSeekableBlobTest, BadSize) {
@@ -349,15 +359,20 @@
// Attempt to read two bytes: the last byte in the blob, and one byte passed the end.
std::vector<uint8_t> buf(2);
+ uint64_t offset = blob_info->size_data - 1;
+ uint64_t length = 2;
ASSERT_EQ(ZX_ERR_IO_DATA_INTEGRITY, compressed_blob_collection()->Read(
- node_index, buf.data(), blob_info->size_data - 1, 2));
+ node_index, buf.data(), buf.size(), &offset, &length));
}
TEST_F(ZSTDSeekableBlobNullNodeFinderTest, BadNode) {
std::vector<uint8_t> buf(1);
// Attempt to read a byte from a node that doesn't exist.
- ASSERT_EQ(ZX_ERR_INVALID_ARGS, compressed_blob_collection()->Read(42, buf.data(), 0, 1));
+ uint64_t offset = 0;
+ uint64_t length = 1;
+ ASSERT_EQ(ZX_ERR_INVALID_ARGS,
+ compressed_blob_collection()->Read(42, buf.data(), buf.size(), &offset, &length));
}
TEST_F(ZSTDSeekableBlobWrongAlgorithmTest, BadFlags) {
@@ -367,7 +382,10 @@
std::vector<uint8_t> buf(1);
// Attempt to read a byte from a blob that is not zstd-seekable.
- ASSERT_EQ(ZX_ERR_NOT_SUPPORTED, compressed_blob_collection()->Read(node_index, buf.data(), 0, 1));
+ uint64_t offset = 0;
+ uint64_t length = 1;
+ ASSERT_EQ(ZX_ERR_NOT_SUPPORTED, compressed_blob_collection()->Read(node_index, buf.data(),
+ buf.size(), &offset, &length));
}
} // namespace