Fixed number of blocks calculation
Number of blocks calculation was being based on raw number of bytes
being read, and _not_ the block-aligned start/end offsets (which capture
which blocks, and therefore how many blocks, are read)
Change-Id: Ib62a0e84b39f1e94a8b53eaf393b07023156a735
diff --git a/zircon/system/ulib/blobfs/compression/zstd-seekable-blob.cc b/zircon/system/ulib/blobfs/compression/zstd-seekable-blob.cc
index bc11023..af2818fc 100644
--- a/zircon/system/ulib/blobfs/compression/zstd-seekable-blob.cc
+++ b/zircon/system/ulib/blobfs/compression/zstd-seekable-blob.cc
@@ -45,6 +45,12 @@
TRACE_DURATION("blobfs", "ZSTDRead", "byte_offset", file->byte_offset, "bytes", num_bytes);
+ if (num_bytes == 0) {
+ FS_TRACE_ERROR("[blobfs][zstd-seekable] Attempt to read 0 bytes\n");
+ file->status = ZX_ERR_OUT_OF_RANGE;
+ return -1;
+ }
+
// |file->byte_offset| does not account for ZSTD seekable header.
uint64_t data_byte_offset;
if (add_overflow(kZSTDSeekableHeaderSize, file->byte_offset, &data_byte_offset)) {
@@ -55,28 +61,40 @@
}
// Safely convert units: Bytes to blocks.
- uint64_t data_block_offset64 = data_byte_offset / kBlobfsBlockSize;
- if (data_block_offset64 > std::numeric_limits<uint32_t>::max()) {
- FS_TRACE_ERROR("[blobfs][zstd-seekable] Oversized data block offset: %lu / %u = %lu > %u\n",
- data_byte_offset, kBlobfsBlockSize, data_block_offset64,
+ uint64_t data_block_start64 = data_byte_offset / kBlobfsBlockSize;
+ if (data_block_start64 > std::numeric_limits<uint32_t>::max()) {
+ FS_TRACE_ERROR("[blobfs][zstd-seekable] Oversized data block start: %lu / %u = %lu > %u\n",
+ data_byte_offset, kBlobfsBlockSize, data_block_start64,
std::numeric_limits<uint32_t>::max());
file->status = ZX_ERR_OUT_OF_RANGE;
return -1;
}
- uint32_t data_block_offset = static_cast<uint32_t>(data_block_offset64);
- uint64_t num_blocks64 = fbl::round_up(num_bytes, kBlobfsBlockSize) / kBlobfsBlockSize;
+ uint32_t data_block_start = static_cast<uint32_t>(data_block_start64);
+ uint64_t data_byte_end;
+ if (add_overflow(data_byte_offset, num_bytes, &data_byte_end)) {
+ FS_TRACE_ERROR("[blobfs][zstd-seekable] Oversized data block end: data_byte_offset=%lu, num_bytes=%lu\n",
+ data_byte_offset, num_bytes);
+ file->status = ZX_ERR_OUT_OF_RANGE;
+ return -1;
+ }
+ uint64_t data_block_end64 = fbl::round_up(data_byte_end, kBlobfsBlockSize) / kBlobfsBlockSize;
+ uint64_t num_blocks64;
+ if (sub_overflow(data_block_end64, data_block_start64, &num_blocks64)) {
+ FS_TRACE_ERROR("[blobfs][zstd-seekable] Block calculation error: (data_block_end=%lu - data_block_start=%lu) should be non-negative\n",
+ data_block_end64, data_block_start64);
+ file->status = ZX_ERR_INTERNAL;
+ return -1;
+ }
if (num_blocks64 > std::numeric_limits<uint32_t>::max()) {
- FS_TRACE_ERROR(
- "[blobfs][zstd-seekable] Oversized number of blocks: round_up(%lu, %u) / %u = %lu > %u\n",
- num_bytes, kBlobfsBlockSize, kBlobfsBlockSize, num_blocks64,
- std::numeric_limits<uint32_t>::max());
+ FS_TRACE_ERROR("[blobfs][zstd-seekable] Oversized number of blocks: %lu > %u\n",
+ num_blocks64, std::numeric_limits<uint32_t>::max());
file->status = ZX_ERR_OUT_OF_RANGE;
return -1;
}
uint32_t num_blocks = static_cast<uint32_t>(num_blocks64);
// Delegate block-level read to compressed block collection.
- zx_status_t status = file->blocks->Read(data_block_offset, num_blocks);
+ zx_status_t status = file->blocks->Read(data_block_start, num_blocks);
if (status != ZX_OK) {
FS_TRACE_ERROR("[blobfs][zstd-seekable] Failed to read blocks: %s\n",
zx_status_get_string(status));
diff --git a/zircon/system/ulib/blobfs/test/unit/zstd-seekable-blob-test.cc b/zircon/system/ulib/blobfs/test/unit/zstd-seekable-blob-test.cc
index 5423320..f7d9c43 100644
--- a/zircon/system/ulib/blobfs/test/unit/zstd-seekable-blob-test.cc
+++ b/zircon/system/ulib/blobfs/test/unit/zstd-seekable-blob-test.cc
@@ -49,7 +49,10 @@
const uint32_t kNumFilesystemBlocks = 4000;
+constexpr unsigned kZeroToThirtyTwoAndRandomBlobSrcFunctionRandomSeed = 9572331;
+
void ZeroToThirtyTwoAndRandomBlobSrcFunction(char* data, size_t length) {
+ srand(kZeroToThirtyTwoAndRandomBlobSrcFunctionRandomSeed);
for (size_t i = 0; i < length; i++) {
if ((i / 32) % 2 == 0) {
uint8_t value = static_cast<uint8_t>(i % 32);
@@ -74,6 +77,24 @@
void SetDeviceOwner(std::string name) { gDeviceOwner = name; }
+
+constexpr size_t gLoggingBytesPerLine = 64;
+
+void LogBuf(std::string name, const std::vector<uint8_t>& buf) {
+ cerr << "BUF(" << name << ") :: " << buf.size();
+
+
+ for (size_t i = 0; i < buf.size(); i++) {
+ if ((i % gLoggingBytesPerLine) == 0) {
+ cerr << endl << "BUF(" << name << ") ";
+ fprintf(stderr, "%10lu", i);
+ cerr << " >> ";
+ }
+ fprintf(stderr, "%02X", buf[i]);
+ }
+ cerr << endl;
+}
+
class LoggingBlockDevice : public block_client::BlockDevice {
public:
LoggingBlockDevice() = delete;
@@ -285,7 +306,17 @@
std::vector<uint8_t> expected(blob_info->size_data);
ZeroToThirtyTwoAndRandomBlobSrcFunction(reinterpret_cast<char*>(expected.data()), blob_info->size_data);
ASSERT_OK(compressed_blob_collection()->Read(node_index, buf.data(), 0, blob_info->size_data));
- ASSERT_BYTES_EQ(expected.data(), buf.data(), blob_info->size_data);
+
+ LogBuf("EXPECTED", expected);
+ LogBuf("WAS_READ", buf);
+
+ const size_t incrementSize = kBlobfsBlockSize / 4;
+ for (size_t i = 0; i < blob_info->size_data; i += incrementSize) {
+ EXPECT_BYTES_EQ(&(expected.data()[i]), &(buf.data()[i]), std::min(i + incrementSize, blob_info->size_data) - i);
+ }
+ // Note: Data size is too large for stack allocation in |ASSERT_BYTES_EQ()|. That's why
+ // |memcmp()| is used instead.
+ // ASSERT_EQ(0, memcmp(expected.data(), buf.data(), blob_info->size_data));
}
// DisableZSTDReadLogging();