blob: 607951d868ca0e47621e438c2de8bc7c65154b2e [file] [log] [blame] [edit]
// Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <string.h>
#include <zircon/errors.h>
#include <algorithm>
#include <atomic>
#include <cstdint>
#include <memory>
#include <mutex>
#include <optional>
#include <utility>
#include <gtest/gtest.h>
#include "src/devices/block/drivers/ftl/tests/ftl-shell.h"
#include "src/devices/block/drivers/ftl/tests/ndm-ram-driver.h"
#include "src/storage/lib/ftl/ftln/ftlnp.h"
#include "src/storage/lib/ftl/ftln/ndm-driver.h"
#include "src/storage/lib/ftl/ftln/volume.h"
namespace {
constexpr uint32_t kInvalidPage = static_cast<uint32_t>(-1);
constexpr uint32_t kSpareSize = 16;
constexpr uint32_t kPageSize = 4096;
constexpr uint32_t kPagesPerBlock = 64;
// 50 blocks means 3200 pages, which is enough to have several map pages.
constexpr ftl::VolumeOptions kDefaultOptions = {.num_blocks = 50,
.max_bad_blocks = 2,
.block_size = kPageSize * kPagesPerBlock,
.page_size = kPageSize,
.eb_size = kSpareSize,
.flags = 0};
// Don't sprinkle in errors by default.
constexpr TestOptions kBoringTestOptions = {
.ecc_error_interval = -1,
.bad_block_interval = -1,
.bad_block_burst = 0,
.use_half_size = false,
.save_config_data = true,
.power_failure_delay = -1,
.emulate_half_write_on_power_failure = false,
.ftl_logger = std::nullopt,
};
TEST(FtlTest, IncompleteWriteWithValidity) {
uint8_t spare[kSpareSize];
memset(spare, 0xff, kSpareSize);
FtlnSetSpareValidity(spare);
ASSERT_FALSE(FtlnIncompleteWrite(spare));
}
TEST(FtlTest, IncompleteWriteWithBadValidity) {
uint8_t spare[kSpareSize];
memset(spare, 0xff, kSpareSize);
spare[14] = 0;
ASSERT_TRUE(FtlnIncompleteWrite(spare));
}
TEST(FtlTest, IncompleteWriteNoValidityBadWearCount) {
uint8_t spare[kSpareSize];
memset(spare, 0xff, kSpareSize);
ASSERT_TRUE(FtlnIncompleteWrite(spare));
}
TEST(FtlTest, IncompleteWriteNoValidityGoodWearCount) {
uint8_t spare[kSpareSize];
memset(spare, 0xff, kSpareSize);
spare[10] = 0;
ASSERT_FALSE(FtlnIncompleteWrite(spare));
}
TEST(FtlTest, WriteRemountRead) {
FtlShell ftl;
ASSERT_TRUE(ftl.Init(kDefaultOptions));
ftl::Volume* volume = ftl.volume();
uint8_t buf[kPageSize];
memcpy(buf, "abc123", 6);
ASSERT_EQ(ZX_OK, volume->Write(1, 1, buf));
ASSERT_EQ(ZX_OK, volume->Flush());
ASSERT_EQ(nullptr, volume->ReAttach());
uint8_t buf2[kPageSize];
ASSERT_EQ(ZX_OK, volume->Read(1, 1, buf2));
ASSERT_EQ(0, memcmp(buf, buf2, kPageSize));
}
// Test powercuts on map block transfer.
TEST(FtlTest, PowerCutOnBlockTransfer) {
FtlShell ftl_shell;
auto driver_owned = std::make_unique<NdmRamDriver>(kDefaultOptions, kBoringTestOptions);
ASSERT_EQ(nullptr, driver_owned->Init());
NdmRamDriver* driver_unowned = driver_owned.get();
ASSERT_TRUE(ftl_shell.InitWithDriver(std::move(driver_owned)));
ftl::Volume* volume = ftl_shell.volume();
// Do a normal write + flush.
uint8_t buf[kPageSize];
memcpy(buf, "abc123", 6);
ASSERT_EQ(ZX_OK, volume->Write(0, 1, buf));
ASSERT_EQ(ZX_OK, volume->Flush());
// Get the page number of where the map page was just written.
FTLN ftl = reinterpret_cast<FTLN>(
reinterpret_cast<ftl::VolumeImpl*>(volume)->GetInternalVolumeForTest());
uint32_t phys_map_page = ftl->mpns[0];
// Verify that it is not unmapped.
ASSERT_NE(0xFFFFFFFFu, phys_map_page);
// Test increasingly delayed power cuts until the transfer completes.
uint32_t new_phys_map_page = phys_map_page;
int power_cut_delay = -1;
while (new_phys_map_page == phys_map_page) {
driver_unowned->SetPowerFailureDelay(++power_cut_delay);
// This is expected to fail many times.
FtlnRecycleMapBlk(ftl, phys_map_page / kPagesPerBlock);
// Re-enable power.
driver_unowned->SetPowerFailureDelay(-1);
// Reattach and grab new ftln and new location of the map page.
ASSERT_EQ(nullptr, volume->ReAttach());
ftl = reinterpret_cast<FTLN>(
reinterpret_cast<ftl::VolumeImpl*>(volume)->GetInternalVolumeForTest());
new_phys_map_page = ftl->mpns[0];
// Verify that it is not unmapped.
ASSERT_NE(0xFFFFFFFFu, new_phys_map_page);
}
// This should never succeed on the first try, since it prevents any reads or writes.
ASSERT_LT(0, power_cut_delay);
}
// Poor ECC results in block migration due only to reads.
TEST(FtlTest, MigrateOnDangerousEcc) {
FtlShell ftl_shell;
auto driver_owned = std::make_unique<NdmRamDriver>(kDefaultOptions, kBoringTestOptions);
ASSERT_EQ(nullptr, driver_owned->Init());
NdmRamDriver* driver_unowned = driver_owned.get();
ASSERT_TRUE(ftl_shell.InitWithDriver(std::move(driver_owned)));
ftl::Volume* volume = ftl_shell.volume();
// Do a normal write for an entire volume block.
uint8_t buf[kPageSize];
for (uint32_t i = 0; i < kPagesPerBlock; ++i) {
memcpy(buf, &i, sizeof(uint32_t));
ASSERT_EQ(ZX_OK, volume->Write(i, 1, buf));
}
// Recreate the original page in the buffer for later comparisons.
{
uint32_t tmp = 0;
memcpy(buf, &tmp, sizeof(uint32_t));
}
// The next write should be in a different volume block than the first write.
uint8_t buf2[kPageSize];
memcpy(buf2, "xzy789", 6);
ASSERT_EQ(ZX_OK, volume->Write(kPagesPerBlock, 1, buf2));
// Flush it all to disk.
ASSERT_EQ(ZX_OK, volume->Flush());
// Check the current location of the first written page. This is only reliable because the test
// options have disabled simulating random bad blocks.
FTLN ftl = reinterpret_cast<FTLN>(
reinterpret_cast<ftl::VolumeImpl*>(volume)->GetInternalVolumeForTest());
uint32_t phys_page = -1;
ASSERT_EQ(0, FtlnMapGetPpn(ftl, 0, &phys_page));
ASSERT_NE(-1u, phys_page);
// Set it to have poor ECC and read it back to flag the need for recycle.
driver_unowned->SetUnsafeEcc(phys_page, true);
ASSERT_EQ(ZX_OK, volume->Read(0, 1, buf2));
ASSERT_EQ(0, memcmp(buf, buf2, kPageSize));
// Nothing has changed. (yet)
uint32_t new_phys_page = -1;
ASSERT_EQ(0, FtlnMapGetPpn(ftl, 0, &new_phys_page));
ASSERT_EQ(phys_page, new_phys_page);
// Any read or write should trigger a recycle here on the block that needs it. So read something
// completely unrelated in a different block.
ASSERT_EQ(ZX_OK, volume->Read(kPagesPerBlock, 1, buf2));
// Check the new location of page 0 as it should have migrated.
ASSERT_EQ(0, FtlnMapGetPpn(ftl, 0, &new_phys_page));
ASSERT_NE(phys_page, new_phys_page);
// Verify it is still intact.
ASSERT_EQ(ZX_OK, volume->Read(0, 1, buf2));
ASSERT_EQ(0, memcmp(buf, buf2, kPageSize));
}
// Simulate when page is partially written on an ECC boundary, allowing it to appear valid.
// This shouldn't be a real scenario that matters except that we use the OobDoubler class
// that masks this possibility for both the upper and lower layers.
TEST(FtlTest, PartialPageWriteRecovery) {
FtlShell ftl_shell;
auto driver_owned = std::make_unique<NdmRamDriver>(kDefaultOptions, kBoringTestOptions);
ASSERT_EQ(nullptr, driver_owned->Init());
NdmRamDriver* driver_unowned = driver_owned.get();
ASSERT_TRUE(ftl_shell.InitWithDriver(std::move(driver_owned)));
ftl::Volume* volume = ftl_shell.volume();
// Write some data to the tail end of a map page.
uint8_t buf[kPageSize];
memcpy(buf, "abc123", 6);
uint32_t page;
FTLN ftl = reinterpret_cast<FTLN>(
reinterpret_cast<ftl::VolumeImpl*>(volume)->GetInternalVolumeForTest());
page = ftl->mappings_per_mpg - 1;
ASSERT_EQ(ZX_OK, volume->Write(page, 1, buf));
ASSERT_EQ(ZX_OK, volume->Flush());
// Write some data to another page indexed by the same map page.
uint8_t buf2[kPageSize];
memcpy(buf2, "xyz789", 6);
ASSERT_EQ(ZX_OK, volume->Write(0, 1, buf2));
ASSERT_EQ(ZX_OK, volume->Flush());
// Find the physical location of this map page, erase the ending of it including the spare since
// it is normally spread along the ECC pages of the nand. This should throw away the other
// written page as we're simulating an incomplete write of the page.
memset(&driver_unowned->MainData(ftl->mpns[0])[kPageSize / 2], -1, kPageSize / 2);
memset(&driver_unowned->SpareData(ftl->mpns[0])[kSpareSize / 2], -1, kSpareSize / 2);
// Remount with the corruption.
volume->ReAttach();
// Verify the original page is intact.
ASSERT_EQ(ZX_OK, volume->Read(page, 1, buf2));
ASSERT_EQ(0, memcmp(buf, buf2, kPageSize));
// We should have lost the second write flush and expect erase data for the other page.
ASSERT_EQ(ZX_OK, volume->Read(0, 1, buf2));
for (unsigned char& c : buf2) {
ASSERT_EQ(0xFFu, c);
}
}
TEST(FtlTest, EnableNewWearLeveling) {
FtlShell ftl_shell;
auto driver_owned = std::make_unique<NdmRamDriver>(kDefaultOptions, kBoringTestOptions);
ASSERT_EQ(nullptr, driver_owned->Init());
ASSERT_TRUE(ftl_shell.InitWithDriver(std::move(driver_owned)));
ftl::Volume* volume = ftl_shell.volume();
ftl::VolumeImpl* volume_impl = reinterpret_cast<ftl::VolumeImpl*>(volume);
bool state;
volume_impl->GetNewWearLeveling(&state);
ASSERT_FALSE(state);
volume_impl->SetNewWearLeveling(true);
volume_impl->GetNewWearLeveling(&state);
ASSERT_TRUE(state);
// Remount to see that the status has reset.
volume->ReAttach();
volume_impl->GetNewWearLeveling(&state);
ASSERT_FALSE(state);
// Now enable it.
volume_impl->SetNewWearLeveling(true);
volume_impl->GetNewWearLeveling(&state);
ASSERT_TRUE(state);
}
// Demonstrate how ECC failures part way through a map block can lead to permanent data loss
// due to preemptive recycling of free map pages during initialization.
//
// We set up the FTL such that Map Block 0 = [mpn0, mpn1, mpn0, mpn1 ...] and Map Block 1 = [mpn0].
// We then set an ECC failure on the first page in map block 0 (mpn1) which causes build_map to stop
// processing map block 0 (and only has mpn0 at that point). Once map block 1 is processed, there
// are no current mappings in map block 0 from the FTL's perspective, and thus it is preemptively
// erased in `init_ftln` after `build_map` returns.
TEST(FtlTest, MapPageEccFailure) {
FtlShell ftl_shell;
auto driver_owned = std::make_unique<NdmRamDriver>(kDefaultOptions, kBoringTestOptions);
ASSERT_EQ(nullptr, driver_owned->Init());
NdmRamDriver* driver_unowned = driver_owned.get();
ASSERT_TRUE(ftl_shell.InitWithDriver(std::move(driver_owned)));
ftl::Volume* volume = ftl_shell.volume();
FTLN ftl = reinterpret_cast<FTLN>(
reinterpret_cast<ftl::VolumeImpl*>(volume)->GetInternalVolumeForTest());
uint8_t buf[kPageSize];
memcpy(buf, "abc123", 6);
constexpr uint32_t kMappingsPerMpn = kPageSize / 4;
// 1. Consume the first map block by writing out kPagesPerBlock pages to pages spanning MPN0/1.
// 2. Consume first page of a new map block by writing to MPN0 (ensure the VPN is one
// that we wrote in the first map block).
uint32_t first_block_mpn0 = kInvalidPage;
// Write out kPagesPerBlock + 1 pages to alternating map pages so that we consume 2 map blocks.
for (uint32_t page = 0; page < (kPagesPerBlock + 1); ++page) {
// Alternate writing to page 0/kMappingsPerMpn, which will update MPNs 0/1 respectively.
ASSERT_EQ(ZX_OK, volume->Write((page % 2) ? kMappingsPerMpn : 0, 1, buf));
ASSERT_EQ(ZX_OK, volume->Flush());
if (page == 0) {
// Ensure mpns[0] is now valid, and store the block it's in.
ASSERT_NE(ftl->mpns[0], kInvalidPage);
first_block_mpn0 = ftl->mpns[0] / kPagesPerBlock;
}
}
uint32_t phys_page0_old = kInvalidPage;
ASSERT_EQ(0, FtlnMapGetPpn(ftl, 0, &phys_page0_old));
ASSERT_NE(phys_page0_old, kInvalidPage);
uint32_t phys_page1_old = kInvalidPage;
ASSERT_EQ(0, FtlnMapGetPpn(ftl, kMappingsPerMpn, &phys_page1_old));
ASSERT_NE(phys_page1_old, kInvalidPage);
// Now we simulate the 2nd page in the first map block going bad.
// This should result in that physical block being erased when we re-mount.
driver_unowned->SetFailEcc((first_block_mpn0 * kPagesPerBlock) + 1, true);
// Remount with the corruption.
volume->ReAttach();
ftl = reinterpret_cast<FTLN>(
reinterpret_cast<ftl::VolumeImpl*>(volume)->GetInternalVolumeForTest());
// We should expect first_block_mpn0 to now be erased.
ASSERT_TRUE(IS_FREE(ftl->bdata[first_block_mpn0]));
// At this point we've effectively lost all mappings in MPN1 but still have MPN0.
ASSERT_NE(ftl->mpns[0], kInvalidPage);
uint32_t phys_page0_new = kInvalidPage;
ASSERT_EQ(0, FtlnMapGetPpn(ftl, 0, &phys_page0_new));
ASSERT_EQ(phys_page0_new, phys_page0_old);
ASSERT_EQ(ftl->mpns[1], kInvalidPage);
uint32_t phys_page1_new = kInvalidPage;
ASSERT_EQ(0, FtlnMapGetPpn(ftl, kMappingsPerMpn, &phys_page1_new));
ASSERT_EQ(phys_page1_new, kInvalidPage);
}
// Purposely generates a high garbage level by interleaving which vpages get written, stops after
// partial volume and map blocks to waste that space as well.
void FillWithGarbage(FtlShell* ftl, uint32_t num_blocks) {
uint8_t buf[kPageSize];
memcpy(buf, "abc123", 6);
ftl::Volume* volume = ftl->volume();
// First write every page in order.
for (uint32_t i = 0; i < ftl->num_pages(); ++i) {
ASSERT_EQ(ZX_OK, volume->Write(i, 1, buf));
}
// Now exhaust any breathing room by replacing 1 page from each volume block.
uint32_t page = 0;
for (uint32_t num_writes = ftl->num_pages(); num_writes < kPagesPerBlock * num_blocks;
++num_writes) {
ASSERT_EQ(ZX_OK, volume->Write(page, 1, buf));
page += kPagesPerBlock;
if (page >= ftl->num_pages()) {
// If we go off the end, we'll now replace the second page of each physical block.
page = (page % kPagesPerBlock) + 1;
}
}
// If we happen to end on a complete volume block, add another write.
if (std::max(kPagesPerBlock * num_blocks, ftl->num_pages()) % kPagesPerBlock == 0) {
ASSERT_EQ(ZX_OK, volume->Write(page, 1, buf));
}
}
// Ensure we can remount after filling with garbage.
TEST(FtlTest, HighGarbageLevelRemount) {
FtlShell ftl_shell;
auto driver = std::make_unique<NdmRamDriver>(kDefaultOptions, kBoringTestOptions);
ASSERT_EQ(nullptr, driver->Init());
ASSERT_TRUE(ftl_shell.InitWithDriver(std::move(driver)));
ASSERT_NO_FATAL_FAILURE(FillWithGarbage(&ftl_shell, kDefaultOptions.num_blocks));
// Flush and remount. Not enough map pages to fill the map block.
ftl::Volume* volume = ftl_shell.volume();
ASSERT_EQ(ZX_OK, volume->Flush());
ASSERT_EQ(nullptr, volume->ReAttach());
FTLN ftl = reinterpret_cast<FTLN>(
reinterpret_cast<ftl::VolumeImpl*>(volume)->GetInternalVolumeForTest());
// The FTL maintains this minimum number of free blocks. During mount it will need to grab 2 of
// them, one for new map pages, one for new volume pages, which means that we'll need to recycle
// and reclaim blocks, first of which it will try to recover are the half-finished map block and
// volume block. This should be true if we've generated enough "garbage" in the volume.
EXPECT_LE(ftl->num_free_blks, static_cast<uint32_t>(FTLN_MIN_FREE_BLKS));
// Ensure that we can perform a read.
uint8_t buf[kPageSize];
ASSERT_EQ(ZX_OK, volume->Read(1, 1, buf));
}
// It is a critical invariant that the erase list is the last thing written at shutdown and then
// erased before any other mutating operations at mount. We fill the volume with garbage first to
// trigger block recycles on mount, which should never happen before the erase list is removed.
TEST(FtlTest, EraseListLastAndFirst) {
std::mutex lock;
uint32_t last_write_page = 0;
uint32_t first_mutation_page = 0;
NdmRamDriver::Op first_mutation_type = NdmRamDriver::Op::Read;
bool first_mutation_done = false;
FtlShell ftl_shell;
auto driver = std::make_unique<NdmRamDriver>(kDefaultOptions, kBoringTestOptions);
driver->set_operation_callback([&](NdmRamDriver::Op op, uint32_t page) {
std::lock_guard l(lock);
// Exclude the 2 blocks used for ndm metadata.
if (page / kPagesPerBlock > kDefaultOptions.num_blocks - 3) {
return 0;
}
if ((op == NdmRamDriver::Op::Write || op == NdmRamDriver::Op::Erase) && !first_mutation_done) {
first_mutation_done = true;
first_mutation_type = op;
first_mutation_page = page;
}
if (op == NdmRamDriver::Op::Write) {
last_write_page = page;
}
return 0;
});
ASSERT_EQ(nullptr, driver->Init());
NdmRamDriver* unowned_driver = driver.get();
ASSERT_TRUE(ftl_shell.InitWithDriver(std::move(driver)));
ASSERT_NO_FATAL_FAILURE(FillWithGarbage(&ftl_shell, kDefaultOptions.num_blocks));
ftl::Volume* volume = ftl_shell.volume();
ASSERT_EQ(ZX_OK, volume->Flush());
// Recycle the map page which eagerly erases. This way there is something for the erase list to
// contain. Do it twice since we need at least 2 erased blocks to write out an erase list.
FTLN ftl = reinterpret_cast<FTLN>(
reinterpret_cast<ftl::VolumeImpl*>(volume)->GetInternalVolumeForTest());
uint32_t meta_page = ftl->num_map_pgs - 1;
uint32_t phys_map_page = ftl->mpns[0];
ASSERT_NE(0xFFFFFFFFu, phys_map_page);
ASSERT_EQ(0, FtlnRecycleMapBlk(ftl, phys_map_page / kPagesPerBlock));
ASSERT_NE(phys_map_page, ftl->mpns[0]);
phys_map_page = ftl->mpns[0];
ASSERT_NE(0xFFFFFFFFu, phys_map_page);
ASSERT_EQ(0, FtlnRecycleMapBlk(ftl, phys_map_page / kPagesPerBlock));
ASSERT_NE(phys_map_page, ftl->mpns[0]);
// Erase list gets written during unmount.
volume->Unmount();
uint32_t last_write_unmount = 0;
{
// Save the last mutation from unmount, which should be a write.
std::lock_guard l(lock);
last_write_unmount = last_write_page;
// Reset the first_mutation bit so that it will recapture on mount.
first_mutation_done = false;
}
// Get the spare for it.
uint8_t spare_buf[kSpareSize];
ASSERT_EQ(ZX_OK, unowned_driver->NandRead(last_write_unmount, 1, nullptr, spare_buf));
// It is a meta-page, which can only be an erase or continuation of an erase page.
ASSERT_NE(static_cast<uint32_t>(-1), GET_SA_BC(spare_buf));
ASSERT_EQ(meta_page, GET_SA_VPN(spare_buf));
// Remount. Verify that first mutation was the deletion.
ASSERT_EQ(nullptr, volume->ReAttach());
uint8_t page_buf[kPageSize];
ASSERT_EQ(ZX_OK, volume->Write(0, 1, page_buf));
{
std::lock_guard l(lock);
// First mutation should be to the block containing the erase list.
ASSERT_TRUE(first_mutation_done);
ASSERT_EQ(NdmRamDriver::Op::Erase, first_mutation_type);
ASSERT_EQ(last_write_unmount / kPagesPerBlock, first_mutation_page / kPagesPerBlock);
}
}
TEST(FtlTest, BitMapTest) {
FtlShell ftl_shell;
auto driver = std::make_unique<NdmRamDriver>(kDefaultOptions, kBoringTestOptions);
ASSERT_EQ(nullptr, driver->Init());
ASSERT_TRUE(ftl_shell.InitWithDriver(std::move(driver)));
FTLN ftl = reinterpret_cast<FTLN>(
reinterpret_cast<ftl::VolumeImpl*>(ftl_shell.volume())->GetInternalVolumeForTest());
// Nothing should get marked bad with kBoringTestOptions. All reads are always successful.
ASSERT_EQ(FtlnCountBlockBitmap(ftl, ftl->maybe_bad), 0u);
ASSERT_EQ(FtlnCheckBlockBitmap(ftl, ftl->maybe_bad, 0), 0u);
ASSERT_EQ(FtlnCheckBlockBitmap(ftl, ftl->maybe_bad, 1), 0u);
FtlnSetBlockBitmap(ftl, ftl->maybe_bad, 0);
ASSERT_EQ(FtlnCheckBlockBitmap(ftl, ftl->maybe_bad, 0), 1u);
ASSERT_EQ(FtlnCheckBlockBitmap(ftl, ftl->maybe_bad, 1), 0u);
ASSERT_EQ(FtlnCountBlockBitmap(ftl, ftl->maybe_bad), 1u);
// Setting it again should do nothing.
FtlnSetBlockBitmap(ftl, ftl->maybe_bad, 0);
ASSERT_EQ(FtlnCheckBlockBitmap(ftl, ftl->maybe_bad, 0), 1u);
ASSERT_EQ(FtlnCountBlockBitmap(ftl, ftl->maybe_bad), 1u);
FtlnSetBlockBitmap(ftl, ftl->maybe_bad, ftl->num_blks - 1);
ASSERT_EQ(FtlnCheckBlockBitmap(ftl, ftl->maybe_bad, ftl->num_blks - 1), 1u);
ASSERT_EQ(FtlnCountBlockBitmap(ftl, ftl->maybe_bad), 2u);
// Unset zero now.
FtlnUnsetBlockBitmap(ftl, ftl->maybe_bad, 0);
ASSERT_EQ(FtlnCheckBlockBitmap(ftl, ftl->maybe_bad, 0), 0u);
ASSERT_EQ(FtlnCountBlockBitmap(ftl, ftl->maybe_bad), 1u);
// Repeating it should do nothing.
FtlnUnsetBlockBitmap(ftl, ftl->maybe_bad, 0);
ASSERT_EQ(FtlnCheckBlockBitmap(ftl, ftl->maybe_bad, 0), 0u);
ASSERT_EQ(FtlnCountBlockBitmap(ftl, ftl->maybe_bad), 1u);
// Do a bunch in a row to see that things are being set properly at the u32 member boundary.
uint32_t previous = FtlnCountBlockBitmap(ftl, ftl->maybe_bad);
for (int i = 25; i < 35; ++i) {
FtlnSetBlockBitmap(ftl, ftl->maybe_bad, i);
ASSERT_EQ(FtlnCheckBlockBitmap(ftl, ftl->maybe_bad, i), 1u);
uint32_t next = FtlnCountBlockBitmap(ftl, ftl->maybe_bad);
ASSERT_EQ(previous + 1, next);
previous = next;
}
// Same range, but unsetting.
for (int i = 25; i < 35; ++i) {
FtlnUnsetBlockBitmap(ftl, ftl->maybe_bad, i);
ASSERT_EQ(FtlnCheckBlockBitmap(ftl, ftl->maybe_bad, i), 0u);
uint32_t next = FtlnCountBlockBitmap(ftl, ftl->maybe_bad);
ASSERT_EQ(previous - 1, next);
previous = next;
}
}
TEST(FtlTest, FindWornBlockTest) {
std::atomic<bool> fail_next_erase = false;
FtlShell ftl_shell;
auto driver = std::make_unique<NdmRamDriver>(kDefaultOptions, kBoringTestOptions);
driver->set_operation_callback([&fail_next_erase](NdmRamDriver::Op op, uint32_t page) {
if (op == NdmRamDriver::Op::Erase &&
fail_next_erase.exchange(false, std::memory_order_relaxed)) {
return ftl::kNdmError;
}
return ftl::kNdmOk;
});
NdmRamDriver* driver_unowned = driver.get();
ASSERT_EQ(nullptr, driver->Init());
ASSERT_TRUE(ftl_shell.InitWithDriver(std::move(driver)));
ftl::Volume* volume = ftl_shell.volume();
auto buffer = std::make_unique<char[]>(kPageSize);
ASSERT_EQ(ZX_OK, volume->Write(0, 1, buffer.get()));
ASSERT_EQ(ZX_OK, volume->Flush());
ASSERT_EQ(nullptr, volume->ReAttach());
ASSERT_EQ(ZX_OK, volume->Write(1, 1, buffer.get()));
// Write page 2 many times to force it onto another block.
for (uint32_t i = 0; i < kPagesPerBlock; ++i) {
ASSERT_EQ(ZX_OK, volume->Write(2, 1, buffer.get()));
}
// The writes should end up in different blocks, since you can't append to a used block after
// remount. It might have been recycled during mount if the volume was super full, but that isn't
// the case.
FTLN ftl = reinterpret_cast<FTLN>(
reinterpret_cast<ftl::VolumeImpl*>(volume)->GetInternalVolumeForTest());
uint32_t page_zero_physical;
ASSERT_EQ(0, FtlnMapGetPpn(ftl, 0, &page_zero_physical));
uint32_t page_one_physical;
ASSERT_EQ(0, FtlnMapGetPpn(ftl, 1, &page_one_physical));
uint32_t page_two_physical;
ASSERT_EQ(0, FtlnMapGetPpn(ftl, 2, &page_two_physical));
ASSERT_NE(page_zero_physical / kPagesPerBlock, page_one_physical / kPagesPerBlock);
ASSERT_NE(page_zero_physical / kPagesPerBlock, page_two_physical / kPagesPerBlock);
ASSERT_NE(page_one_physical / kPagesPerBlock, page_two_physical / kPagesPerBlock);
// All are now going to return reads that required many ECC corrections.
driver_unowned->SetUnsafeEcc(page_zero_physical, true);
driver_unowned->SetUnsafeEcc(page_one_physical, true);
driver_unowned->SetUnsafeEcc(page_two_physical, true);
// Pretend page one has been read a lot. Its issues are attributable to read disturb, set it a
// bit lower so that it doesn't trigger a recycle.
SET_RC(ftl->bdata[page_one_physical / kPagesPerBlock], ftl->max_rc - 5);
ftl::Volume::Stats stats;
// Reading page zero shouldn't change anything as it was not recently written.
ASSERT_EQ(ZX_OK, volume->Read(0, 1, buffer.get()));
ASSERT_EQ(0u, FtlnCheckBlockBitmap(ftl, ftl->maybe_bad, page_zero_physical / kPagesPerBlock));
ASSERT_EQ(0u, FtlnCountBlockBitmap(ftl, ftl->maybe_bad));
volume->GetStats(&stats);
ASSERT_EQ(0u, stats.worn_blocks_detected);
// Reading page one shouldn't change anything as the failure is just read-disturb.
ASSERT_EQ(ZX_OK, volume->Read(1, 1, buffer.get()));
ASSERT_EQ(0u, FtlnCheckBlockBitmap(ftl, ftl->maybe_bad, page_one_physical / kPagesPerBlock));
ASSERT_EQ(0u, FtlnCountBlockBitmap(ftl, ftl->maybe_bad));
volume->GetStats(&stats);
ASSERT_EQ(0u, stats.worn_blocks_detected);
// Reading page two should mark it worn.
ASSERT_EQ(ZX_OK, volume->Read(2, 1, buffer.get()));
ASSERT_EQ(1u, FtlnCheckBlockBitmap(ftl, ftl->maybe_bad, page_two_physical / kPagesPerBlock));
ASSERT_EQ(1u, FtlnCountBlockBitmap(ftl, ftl->maybe_bad));
volume->GetStats(&stats);
ASSERT_EQ(1u, stats.worn_blocks_detected);
// Performing an operation will recycle the data to a new block, but nothing will be marked bad.
ASSERT_EQ(ZX_OK, volume->Write(3, 1, buffer.get()));
uint32_t page_three_physical;
uint32_t new_page_two_physical;
ASSERT_EQ(0, FtlnMapGetPpn(ftl, 2, &new_page_two_physical));
ASSERT_EQ(0, FtlnMapGetPpn(ftl, 3, &page_three_physical));
ASSERT_NE(page_two_physical, new_page_two_physical);
ASSERT_EQ(new_page_two_physical / kPagesPerBlock, page_three_physical / kPagesPerBlock);
ASSERT_EQ(1u, FtlnCheckBlockBitmap(ftl, ftl->maybe_bad, page_two_physical / kPagesPerBlock));
ASSERT_EQ(1u, FtlnCountBlockBitmap(ftl, ftl->maybe_bad));
// Get block marked bad to remove it from the worn list, but not the written list.
ftl::VolumeImpl::Counters counters;
ASSERT_EQ(ZX_OK, volume->GetCounters(&counters));
ASSERT_EQ(0u, counters.running_bad_blocks);
fail_next_erase.store(true, std::memory_order_relaxed);
ASSERT_EQ(0, FtlnEraseBlk(ftl, page_two_physical / kPagesPerBlock));
ASSERT_EQ(ZX_OK, volume->GetCounters(&counters));
ASSERT_EQ(1u, counters.running_bad_blocks);
ASSERT_EQ(0u, FtlnCountBlockBitmap(ftl, ftl->maybe_bad));
ASSERT_EQ(0u, FtlnCheckBlockBitmap(ftl, ftl->maybe_bad, page_two_physical / kPagesPerBlock));
ASSERT_EQ(1u, FtlnCheckBlockBitmap(ftl, ftl->written, page_two_physical / kPagesPerBlock));
}
} // namespace