blob: 7fdf074a247381b93ec7e7e79e6941b743f91b2a [file] [log] [blame]
// Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <string.h>
#include <sys/stat.h>
#include "f2fs.h"
#include "zircon/errors.h"
#include "zircon/types.h"
namespace f2fs {
/*
* inline functions
*/
inline seg_entry *SegMgr::GetSegEntry(uint32_t segno) {
f2fs_sb_info &sbi = fs_->SbInfo();
sit_info *sit_i = SIT_I(&sbi);
return &sit_i->sentries[segno];
}
inline sec_entry *SegMgr::GetSecEntry(uint32_t segno) {
f2fs_sb_info &sbi = fs_->SbInfo();
sit_info *sit_i = SIT_I(&sbi);
return &sit_i->sec_entries[GetSecNo(&sbi, segno)];
}
inline uint32_t SegMgr::GetValidBlocks(uint32_t segno, int section) {
/*
* In order to get # of valid blocks in a section instantly from many
* segments, f2fs manages two counting structures separately.
*/
if (section > 1) {
return GetSecEntry(segno)->valid_blocks;
}
return GetSegEntry(segno)->valid_blocks;
}
inline void SegMgr::SegInfoFromRawSit(seg_entry *se, f2fs_sit_entry *rs) {
se->valid_blocks = GET_SIT_VBLOCKS(rs);
se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs);
memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
se->type = GET_SIT_TYPE(rs);
se->mtime = LeToCpu(uint64_t{rs->mtime});
}
inline void SegMgr::SegInfoToRawSit(seg_entry *se, f2fs_sit_entry *rs) {
uint16_t raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) | se->valid_blocks;
rs->vblocks = CpuToLe(raw_vblocks);
memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
se->ckpt_valid_blocks = se->valid_blocks;
rs->mtime = CpuToLe(static_cast<uint64_t>(se->mtime));
}
inline uint32_t SegMgr::FindNextInuse(free_segmap_info *free_i, uint32_t max,
uint32_t segno) {
uint32_t ret;
ReadLock(&free_i->segmap_lock);
ret = find_next_bit_le(free_i->free_segmap, max, segno);
ReadUnlock(&free_i->segmap_lock);
return ret;
}
inline void SegMgr::__SetFree(uint32_t segno) {
f2fs_sb_info &sbi = fs_->SbInfo();
free_segmap_info *free_i = FREE_I(&sbi);
uint32_t secno = segno / sbi.segs_per_sec;
uint32_t start_segno = secno * sbi.segs_per_sec;
uint32_t next;
WriteLock(&free_i->segmap_lock);
clear_bit(segno, free_i->free_segmap);
free_i->free_segments++;
next = find_next_bit_le(free_i->free_segmap, TotalSegs(&sbi), start_segno);
if (next >= start_segno + sbi.segs_per_sec) {
clear_bit(secno, free_i->free_secmap);
free_i->free_sections++;
}
WriteUnlock(&free_i->segmap_lock);
}
inline void SegMgr::__SetInuse(uint32_t segno) {
f2fs_sb_info &sbi = fs_->SbInfo();
free_segmap_info *free_i = FREE_I(&sbi);
uint32_t secno = segno / sbi.segs_per_sec;
set_bit(segno, free_i->free_segmap);
free_i->free_segments--;
if (!test_and_set_bit(secno, free_i->free_secmap))
free_i->free_sections--;
}
inline void SegMgr::__SetTestAndFree(uint32_t segno) {
f2fs_sb_info &sbi = fs_->SbInfo();
free_segmap_info *free_i = FREE_I(&sbi);
uint32_t secno = segno / sbi.segs_per_sec;
uint32_t start_segno = secno * sbi.segs_per_sec;
uint32_t next;
WriteLock(&free_i->segmap_lock);
if (test_and_clear_bit(segno, free_i->free_segmap)) {
free_i->free_segments++;
next = find_next_bit_le(free_i->free_segmap, TotalSegs(&sbi), start_segno);
if (next >= start_segno + sbi.segs_per_sec) {
if (test_and_clear_bit(secno, free_i->free_secmap))
free_i->free_sections++;
}
}
WriteUnlock(&free_i->segmap_lock);
}
inline void SegMgr::__SetTestAndInuse(uint32_t segno) {
f2fs_sb_info &sbi = fs_->SbInfo();
free_segmap_info *free_i = FREE_I(&sbi);
uint32_t secno = segno / sbi.segs_per_sec;
WriteLock(&free_i->segmap_lock);
if (!test_and_set_bit(segno, free_i->free_segmap)) {
free_i->free_segments--;
if (!test_and_set_bit(secno, free_i->free_secmap))
free_i->free_sections--;
}
WriteUnlock(&free_i->segmap_lock);
}
void SegMgr::GetSitBitmap(void *dst_addr) {
f2fs_sb_info &sbi = fs_->SbInfo();
sit_info *sit_i = SIT_I(&sbi);
memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size);
}
#if 0 // porting needed
inline block_t SegMgr::WrittenBlockCount() {
f2fs_sb_info &sbi = fs_->SbInfo();
sit_info *sit_i = SIT_I(&sbi);
block_t vblocks;
mtx_lock(&sit_i->sentry_lock);
vblocks = sit_i->written_valid_blocks;
mtx_unlock(&sit_i->sentry_lock);
return vblocks;
}
#endif
uint32_t SegMgr::FreeSegments() {
f2fs_sb_info &sbi = fs_->SbInfo();
free_segmap_info *free_i = FREE_I(&sbi);
uint32_t free_segs;
ReadLock(&free_i->segmap_lock);
free_segs = free_i->free_segments;
ReadUnlock(&free_i->segmap_lock);
return free_segs;
}
inline int SegMgr::ReservedSegments() {
f2fs_sb_info &sbi = fs_->SbInfo();
return SM_I(&sbi)->reserved_segments;
}
inline uint32_t SegMgr::FreeSections() {
f2fs_sb_info &sbi = fs_->SbInfo();
free_segmap_info *free_i = FREE_I(&sbi);
uint32_t free_secs;
ReadLock(&free_i->segmap_lock);
free_secs = free_i->free_sections;
ReadUnlock(&free_i->segmap_lock);
return free_secs;
}
inline uint32_t SegMgr::PrefreeSegments() {
f2fs_sb_info &sbi = fs_->SbInfo();
return DIRTY_I(&sbi)->nr_dirty[static_cast<int>(DirtyType::kPre)];
}
inline uint32_t SegMgr::DirtySegments() {
f2fs_sb_info &sbi = fs_->SbInfo();
return DIRTY_I(&sbi)->nr_dirty[static_cast<int>(DirtyType::kDirtyHotData)] +
DIRTY_I(&sbi)->nr_dirty[static_cast<int>(DirtyType::kDirtyWarmData)] +
DIRTY_I(&sbi)->nr_dirty[static_cast<int>(DirtyType::kDirtyColdData)] +
DIRTY_I(&sbi)->nr_dirty[static_cast<int>(DirtyType::kDirtyHotNode)] +
DIRTY_I(&sbi)->nr_dirty[static_cast<int>(DirtyType::kDirtyWarmNode)] +
DIRTY_I(&sbi)->nr_dirty[static_cast<int>(DirtyType::kDirtyColdNode)];
}
inline int SegMgr::OverprovisionSegments() {
f2fs_sb_info &sbi = fs_->SbInfo();
return SM_I(&sbi)->ovp_segments;
}
inline int SegMgr::OverprovisionSections() {
f2fs_sb_info &sbi = fs_->SbInfo();
return (static_cast<uint32_t>(OverprovisionSegments())) / sbi.segs_per_sec;
}
inline int SegMgr::ReservedSections() {
f2fs_sb_info &sbi = fs_->SbInfo();
return (static_cast<uint32_t>(ReservedSegments())) / sbi.segs_per_sec;
}
inline bool SegMgr::NeedSSR() { return (FreeSections() < static_cast<uint32_t>(OverprovisionSections())); }
inline int SegMgr::GetSsrSegment(int type) {
f2fs_sb_info &sbi = fs_->SbInfo();
curseg_info *curseg = CURSEG_I(&sbi, type);
return DIRTY_I(&sbi)->v_ops->get_victim(&sbi, &(curseg)->next_segno, static_cast<int>(GcType::kBgGc), type, static_cast<uint8_t>(AllocMode::kLFS));
}
inline bool SegMgr::HasNotEnoughFreeSecs() {
return FreeSections() <= static_cast<uint32_t>(ReservedSections());
}
inline uint32_t SegMgr::Utilization() {
f2fs_sb_info &sbi = fs_->SbInfo();
return static_cast<uint32_t>(static_cast<int64_t>(valid_user_blocks(&sbi)) * 100 /
static_cast<int64_t>(sbi.user_block_count));
}
/*
* Sometimes f2fs may be better to drop out-of-place update policy.
* So, if fs utilization is over kMinIpuUtil, then f2fs tries to write
* data in the original place likewise other traditional file systems.
* But, currently set 100 in percentage, which means it is disabled.
* See below need_inplace_update().
*/
constexpr uint32_t kMinIpuUtil = 50;
bool SegMgr::NeedInplaceUpdate(VnodeF2fs *vnode) {
if (S_ISDIR(vnode->i_mode_))
return false;
if (/*NeedSSR() &&*/ Utilization() > kMinIpuUtil)
return true;
return false;
}
uint32_t SegMgr::CursegSegno(int type) {
f2fs_sb_info &sbi = fs_->SbInfo();
curseg_info *curseg = CURSEG_I(&sbi, type);
return curseg->segno;
}
uint8_t SegMgr::CursegAllocType(int type) {
f2fs_sb_info &sbi = fs_->SbInfo();
curseg_info *curseg = CURSEG_I(&sbi, type);
return curseg->alloc_type;
}
inline uint16_t SegMgr::CursegBlkoff(int type) {
f2fs_sb_info &sbi = fs_->SbInfo();
curseg_info *curseg = CURSEG_I(&sbi, type);
return curseg->next_blkoff;
}
inline void SegMgr::CheckSegRange(uint32_t segno) {
f2fs_sb_info &sbi = fs_->SbInfo();
[[maybe_unused]] uint32_t end_segno = SM_I(&sbi)->segment_count - 1;
ZX_ASSERT(!(segno > end_segno));
}
#if 0 // porting needed
/*
* This function is used for only debugging.
* NOTE: In future, we have to remove this function.
*/
inline void SegMgr::VerifyBlockAddr(block_t blk_addr) {
f2fs_sb_info &sbi = fs_->SbInfo();
f2fs_sm_info *sm_info = SM_I(&sbi);
block_t total_blks = sm_info->segment_count << sbi.log_blocks_per_seg;
[[maybe_unused]] block_t start_addr = sm_info->seg0_blkaddr;
[[maybe_unused]] block_t end_addr = start_addr + total_blks - 1;
ZX_ASSERT(!(blk_addr < start_addr));
ZX_ASSERT(!(blk_addr > end_addr));
}
#endif
/*
* Summary block is always treated as invalid block
*/
inline void SegMgr::CheckBlockCount(int segno, f2fs_sit_entry *raw_sit) {
f2fs_sb_info &sbi = fs_->SbInfo();
f2fs_sm_info *sm_info = SM_I(&sbi);
uint32_t end_segno = sm_info->segment_count - 1;
int valid_blocks = 0;
uint32_t i;
/* check segment usage */
ZX_ASSERT(!(GET_SIT_VBLOCKS(raw_sit) > sbi.blocks_per_seg));
/* check boundary of a given segment number */
ZX_ASSERT(!(segno > (int)end_segno));
/* check bitmap with valid block count */
for (i = 0; i < sbi.blocks_per_seg; i++) {
if (f2fs_test_bit(i, reinterpret_cast<char *>(raw_sit->valid_map)))
valid_blocks++;
}
ZX_ASSERT(GET_SIT_VBLOCKS(raw_sit) == valid_blocks);
}
inline pgoff_t SegMgr::CurrentSitAddr(uint32_t start) {
f2fs_sb_info &sbi = fs_->SbInfo();
sit_info *sit_i = SIT_I(&sbi);
uint32_t offset = SitBlockOffset(sit_i, start);
block_t blk_addr = sit_i->sit_base_addr + offset;
CheckSegRange(start);
/* calculate sit block address */
if (f2fs_test_bit(offset, sit_i->sit_bitmap))
blk_addr += sit_i->sit_blocks;
return blk_addr;
}
inline pgoff_t SegMgr::NextSitAddr(pgoff_t block_addr) {
f2fs_sb_info &sbi = fs_->SbInfo();
sit_info *sit_i = SIT_I(&sbi);
block_addr -= sit_i->sit_base_addr;
if (block_addr < sit_i->sit_blocks)
block_addr += sit_i->sit_blocks;
else
block_addr -= sit_i->sit_blocks;
return block_addr + sit_i->sit_base_addr;
}
inline void SegMgr::SetToNextSit(sit_info *sit_i, uint32_t start) {
uint32_t block_off = SitBlockOffset(sit_i, start);
if (f2fs_test_bit(block_off, sit_i->sit_bitmap)) {
f2fs_clear_bit(block_off, sit_i->sit_bitmap);
} else {
f2fs_set_bit(block_off, sit_i->sit_bitmap);
}
}
inline uint64_t SegMgr::GetMtime() {
auto cur_time = time(nullptr);
f2fs_sb_info &sbi = fs_->SbInfo();
sit_info *sit_i = SIT_I(&sbi);
return sit_i->elapsed_time + cur_time - sit_i->mounted_time;
return 0;
}
inline void SegMgr::SetSummary(f2fs_summary *sum, nid_t nid, uint32_t ofs_in_node,
uint8_t version) {
sum->nid = CpuToLe(nid);
sum->ofs_in_node = CpuToLe(ofs_in_node);
sum->version = version;
}
inline block_t SegMgr::StartSumBlock() {
f2fs_sb_info &sbi = fs_->SbInfo();
return __start_cp_addr(&sbi) + LeToCpu(F2FS_CKPT(&sbi)->cp_pack_start_sum);
}
inline block_t SegMgr::SumBlkAddr(int base, int type) {
f2fs_sb_info &sbi = fs_->SbInfo();
return __start_cp_addr(&sbi) + LeToCpu(F2FS_CKPT(&sbi)->cp_pack_total_block_count) -
(base + 1) + type;
}
/*
* functions
*/
SegMgr::SegMgr(F2fs *fs) : fs_(fs){};
int SegMgr::NeedToFlush() {
f2fs_sb_info &sbi = fs_->SbInfo();
uint32_t pages_per_sec = (1 << sbi.log_blocks_per_seg) * sbi.segs_per_sec;
int node_secs =
((get_pages(&sbi, F2FS_DIRTY_NODES) + pages_per_sec - 1) >> sbi.log_blocks_per_seg) /
sbi.segs_per_sec;
int dent_secs =
((get_pages(&sbi, F2FS_DIRTY_DENTS) + pages_per_sec - 1) >> sbi.log_blocks_per_seg) /
sbi.segs_per_sec;
if (sbi.por_doing)
return 0;
if (FreeSections() <= static_cast<uint32_t>(node_secs + 2 * dent_secs + ReservedSections()))
return 1;
return 0;
}
/**
* This function balances dirty node and dentry pages.
* In addition, it controls garbage collection.
*/
void SegMgr::F2fsBalanceFs() {
f2fs_sb_info &sbi = fs_->SbInfo();
WritebackControl wbc = {
#if 0 // porting needed
// .nr_to_write = LONG_MAX,
// .sync_mode = WB_SYNC_ALL,
// .for_reclaim = 0,
#endif
};
if (sbi.por_doing)
return;
/*
* We should do checkpoint when there are so many dirty node pages
* with enough free segments. After then, we should do GC.
*/
if (NeedToFlush()) {
fs_->SyncDirtyDirInodes();
fs_->Nodemgr().SyncNodePages(0, &wbc);
}
// TODO: IMPL GC
if (HasNotEnoughFreeSecs() && PrefreeSegments()) {
#if 0 // porting needed
// mtx_lock(&sbi.gc_mutex);
// f2fs_gc(&sbi, 1);
#endif
fs_->WriteCheckpoint(false, false);
}
}
void SegMgr::__LocateDirtySegment(uint32_t segno, DirtyType dirty_type) {
f2fs_sb_info &sbi = fs_->SbInfo();
dirty_seglist_info *dirty_i = DIRTY_I(&sbi);
/* need not be added */
if (IsCurSeg(&sbi, segno))
return;
if (!test_and_set_bit(segno, dirty_i->dirty_segmap[static_cast<int>(dirty_type)]))
dirty_i->nr_dirty[static_cast<int>(dirty_type)]++;
if (dirty_type == DirtyType::kDirty) {
seg_entry *sentry = GetSegEntry(segno);
dirty_type = static_cast<DirtyType>(sentry->type);
if (!test_and_set_bit(segno, dirty_i->dirty_segmap[static_cast<int>(dirty_type)]))
dirty_i->nr_dirty[static_cast<int>(dirty_type)]++;
}
}
void SegMgr::__RemoveDirtySegment(uint32_t segno, DirtyType dirty_type) {
f2fs_sb_info &sbi = fs_->SbInfo();
dirty_seglist_info *dirty_i = DIRTY_I(&sbi);
if (test_and_clear_bit(segno, dirty_i->dirty_segmap[static_cast<int>(dirty_type)]))
dirty_i->nr_dirty[static_cast<int>(dirty_type)]--;
if (dirty_type == DirtyType::kDirty) {
seg_entry *sentry = GetSegEntry(segno);
dirty_type = static_cast<DirtyType>(sentry->type);
if (test_and_clear_bit(segno, dirty_i->dirty_segmap[static_cast<int>(dirty_type)]))
dirty_i->nr_dirty[static_cast<int>(dirty_type)]--;
clear_bit(segno, dirty_i->victim_segmap[static_cast<int>(GcType::kFgGc)]);
clear_bit(segno, dirty_i->victim_segmap[static_cast<int>(GcType::kBgGc)]);
}
}
/**
* Should not occur error such as -ENOMEM.
* Adding dirty entry into seglist is not critical operation.
* If a given segment is one of current working segments, it won't be added.
*/
void SegMgr::LocateDirtySegment(uint32_t segno) {
f2fs_sb_info &sbi = fs_->SbInfo();
dirty_seglist_info *dirty_i = DIRTY_I(&sbi);
uint16_t valid_blocks;
if (segno == kNullSegNo || IsCurSeg(&sbi, segno))
return;
mtx_lock(&dirty_i->seglist_lock);
valid_blocks = GetValidBlocks(segno, 0);
if (valid_blocks == 0) {
__LocateDirtySegment(segno, DirtyType::kPre);
__RemoveDirtySegment(segno, DirtyType::kDirty);
} else if (valid_blocks < sbi.blocks_per_seg) {
__LocateDirtySegment(segno, DirtyType::kDirty);
} else {
/* Recovery routine with SSR needs this */
__RemoveDirtySegment(segno, DirtyType::kDirty);
}
mtx_unlock(&dirty_i->seglist_lock);
}
/**
* Should call clear_prefree_segments after checkpoint is done.
*/
void SegMgr::SetPrefreeAsFreeSegments() {
f2fs_sb_info &sbi = fs_->SbInfo();
dirty_seglist_info *dirty_i = DIRTY_I(&sbi);
uint32_t segno, offset = 0;
uint32_t total_segs = TotalSegs(&sbi);
mtx_lock(&dirty_i->seglist_lock);
while (true) {
segno = find_next_bit_le(dirty_i->dirty_segmap[static_cast<int>(DirtyType::kPre)], total_segs, offset);
if (segno >= total_segs)
break;
__SetTestAndFree(segno);
offset = segno + 1;
}
mtx_unlock(&dirty_i->seglist_lock);
}
void SegMgr::ClearPrefreeSegments() {
f2fs_sb_info &sbi = fs_->SbInfo();
dirty_seglist_info *dirty_i = DIRTY_I(&sbi);
uint32_t segno, offset = 0;
uint32_t total_segs = TotalSegs(&sbi);
mtx_lock(&dirty_i->seglist_lock);
while (true) {
segno = find_next_bit_le(dirty_i->dirty_segmap[static_cast<int>(DirtyType::kPre)], total_segs, offset);
if (segno >= total_segs)
break;
offset = segno + 1;
if (test_and_clear_bit(segno, dirty_i->dirty_segmap[static_cast<int>(DirtyType::kPre)]))
dirty_i->nr_dirty[static_cast<int>(DirtyType::kPre)]--;
#if 0 // porting needed (Trim)
/* Let's use trim */
// if (test_opt(sbi, DISCARD))
// blkdev_issue_discard(sbi->sb->s_bdev,
// StartBlock(sbi, segno) <<
// sbi->log_sectors_per_block,
// 1 << (sbi->log_sectors_per_block +
// sbi->log_blocks_per_seg),
// GFP_NOFS, 0);
#endif
}
mtx_unlock(&dirty_i->seglist_lock);
}
void SegMgr::__MarkSitEntryDirty(uint32_t segno) {
f2fs_sb_info &sbi = fs_->SbInfo();
sit_info *sit_i = SIT_I(&sbi);
if (!test_and_set_bit_le(segno, sit_i->dirty_sentries_bitmap))
sit_i->dirty_sentries++;
}
void SegMgr::__SetSitEntryType(int type, uint32_t segno, int modified) {
seg_entry *se = GetSegEntry(segno);
se->type = type;
if (modified)
__MarkSitEntryDirty(segno);
}
void SegMgr::UpdateSitEntry(block_t blkaddr, int del) {
f2fs_sb_info &sbi = fs_->SbInfo();
seg_entry *se;
uint32_t segno, offset;
int64_t new_vblocks;
segno = GetSegNo(&sbi, blkaddr);
se = GetSegEntry(segno);
new_vblocks = se->valid_blocks + del;
offset = GetSegOffFromSeg0(&sbi, blkaddr) & (sbi.blocks_per_seg - 1);
ZX_ASSERT(!((new_vblocks >> (sizeof(uint16_t) << 3) || (new_vblocks > sbi.blocks_per_seg))));
se->valid_blocks = new_vblocks;
se->mtime = GetMtime();
SIT_I(&sbi)->max_mtime = se->mtime;
/* Update valid block bitmap */
if (del > 0) {
if (f2fs_set_bit(offset, reinterpret_cast<char *>(se->cur_valid_map)))
ZX_ASSERT(0);
} else {
if (!f2fs_clear_bit(offset, reinterpret_cast<char *>(se->cur_valid_map)))
ZX_ASSERT(0);
}
if (!f2fs_test_bit(offset, reinterpret_cast<char *>(se->ckpt_valid_map)))
se->ckpt_valid_blocks += del;
__MarkSitEntryDirty(segno);
/* update total number of valid blocks to be written in ckpt area */
SIT_I(&sbi)->written_valid_blocks += del;
if (sbi.segs_per_sec > 1)
GetSecEntry(segno)->valid_blocks += del;
}
void SegMgr::RefreshSitEntry(block_t old_blkaddr, block_t new_blkaddr) {
f2fs_sb_info &sbi = fs_->SbInfo();
UpdateSitEntry(new_blkaddr, 1);
if (GetSegNo(&sbi, old_blkaddr) != kNullSegNo)
UpdateSitEntry(old_blkaddr, -1);
}
void SegMgr::InvalidateBlocks(block_t addr) {
f2fs_sb_info &sbi = fs_->SbInfo();
uint32_t segno = GetSegNo(&sbi, addr);
sit_info *sit_i = SIT_I(&sbi);
ZX_ASSERT(addr != NULL_ADDR);
if (addr == NEW_ADDR)
return;
/* add it into sit main buffer */
mtx_lock(&sit_i->sentry_lock);
UpdateSitEntry(addr, -1);
/* add it into dirty seglist */
LocateDirtySegment(segno);
mtx_unlock(&sit_i->sentry_lock);
}
/**
* This function should be resided under the curseg_mutex lock
*/
void SegMgr::__AddSumEntry(int type, f2fs_summary *sum, uint16_t offset) {
f2fs_sb_info &sbi = fs_->SbInfo();
curseg_info *curseg = CURSEG_I(&sbi, type);
char *addr = reinterpret_cast<char *>(curseg->sum_blk);
(addr) += offset * sizeof(f2fs_summary);
memcpy(addr, sum, sizeof(f2fs_summary));
}
/**
* Calculate the number of current summary pages for writing
*/
int SegMgr::NpagesForSummaryFlush() {
f2fs_sb_info &sbi = fs_->SbInfo();
int total_size_bytes = 0;
int valid_sum_count = 0;
int i, sum_space;
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
if (sbi.ckpt->alloc_type[i] == static_cast<uint8_t>(AllocMode::kSSR)) {
valid_sum_count += sbi.blocks_per_seg;
} else {
valid_sum_count += CursegBlkoff(i);
}
}
total_size_bytes = valid_sum_count * (SUMMARY_SIZE + 1) + sizeof(nat_journal) + 2 +
sizeof(sit_journal) + 2;
sum_space = kPageCacheSize - SUM_FOOTER_SIZE;
if (total_size_bytes < sum_space) {
return 1;
} else if (total_size_bytes < 2 * sum_space) {
return 2;
}
return 3;
}
/**
* Caller should put this summary page
*/
Page *SegMgr::GetSumPage(uint32_t segno) {
f2fs_sb_info &sbi = fs_->SbInfo();
return fs_->GetMetaPage(GetSumBlock(&sbi, segno));
}
void SegMgr::WriteSumPage(f2fs_summary_block *sum_blk, block_t blk_addr) {
Page *page = fs_->GrabMetaPage(blk_addr);
void *kaddr = PageAddress(page);
memcpy(kaddr, sum_blk, kPageCacheSize);
#if 0 // porting needed
// set_page_dirty(page);
#endif
FlushDirtyMetaPage(fs_, page);
F2fsPutPage(page, 1);
}
uint32_t SegMgr::CheckPrefreeSegments(int ofs_unit, int type) {
f2fs_sb_info &sbi = fs_->SbInfo();
dirty_seglist_info *dirty_i = DIRTY_I(&sbi);
uint64_t *prefree_segmap = dirty_i->dirty_segmap[static_cast<int>(DirtyType::kPre)];
uint32_t segno, next_segno, i;
int ofs = 0;
/*
* If there is not enough reserved sections,
* we should not reuse prefree segments.
*/
if (HasNotEnoughFreeSecs())
return kNullSegNo;
/*
* NODE page should not reuse prefree segment,
* since those information is used for SPOR.
*/
if (IsNodeSeg(type))
return kNullSegNo;
next:
segno = find_next_bit_le(prefree_segmap, TotalSegs(&sbi), ofs++);
ofs = ((segno / ofs_unit) * ofs_unit) + ofs_unit;
if (segno < TotalSegs(&sbi)) {
/* skip intermediate segments in a section */
if (segno % ofs_unit)
goto next;
/* skip if whole section is not prefree */
next_segno = find_next_zero_bit(prefree_segmap, TotalSegs(&sbi), segno + 1);
if (next_segno - segno < static_cast<uint32_t>(ofs_unit))
goto next;
/* skip if whole section was not free at the last checkpoint */
for (i = 0; i < static_cast<uint32_t>(ofs_unit); i++) {
if (GetSegEntry(segno)->ckpt_valid_blocks)
goto next;
}
return segno;
}
return kNullSegNo;
}
/**
* Find a new segment from the free segments bitmap to right order
* This function should be returned with success, otherwise BUG
*/
void SegMgr::GetNewSegment(uint32_t *newseg, bool new_sec, int dir) {
f2fs_sb_info &sbi = fs_->SbInfo();
free_segmap_info *free_i = FREE_I(&sbi);
uint32_t total_secs = sbi.total_sections;
uint32_t segno, secno, zoneno;
uint32_t total_zones = sbi.total_sections / sbi.secs_per_zone;
uint32_t hint = *newseg / sbi.segs_per_sec;
uint32_t old_zoneno = GetZoneNoFromSegNo(&sbi, *newseg);
uint32_t left_start = hint;
bool init = true;
int go_left = 0;
int i;
WriteLock(&free_i->segmap_lock);
if (!new_sec && ((*newseg + 1) % sbi.segs_per_sec)) {
segno = find_next_zero_bit(free_i->free_segmap, TotalSegs(&sbi), *newseg + 1);
if (segno < TotalSegs(&sbi))
goto got_it;
}
find_other_zone:
secno = find_next_zero_bit(free_i->free_secmap, total_secs, hint);
if (secno >= total_secs) {
if (dir == static_cast<int>(AllocDirection::kAllocRight)) {
secno = find_next_zero_bit(free_i->free_secmap, total_secs, 0);
ZX_ASSERT(!(secno >= total_secs));
} else {
go_left = 1;
left_start = hint - 1;
}
}
if (go_left == 0)
goto skip_left;
while (test_bit(left_start, free_i->free_secmap)) {
if (left_start > 0) {
left_start--;
continue;
}
left_start = find_next_zero_bit(free_i->free_secmap, total_secs, 0);
ZX_ASSERT(!(left_start >= total_secs));
break;
}
secno = left_start;
skip_left:
hint = secno;
segno = secno * sbi.segs_per_sec;
zoneno = secno / sbi.secs_per_zone;
/* give up on finding another zone */
if (!init)
goto got_it;
if (sbi.secs_per_zone == 1)
goto got_it;
if (zoneno == old_zoneno)
goto got_it;
if (dir == static_cast<int>(AllocDirection::kAllocLeft)) {
if (!go_left && zoneno + 1 >= total_zones)
goto got_it;
if (go_left && zoneno == 0)
goto got_it;
}
for (i = 0; i < NR_CURSEG_TYPE; i++) {
if (CURSEG_I(&sbi, i)->zone == zoneno)
break;
}
if (i < NR_CURSEG_TYPE) {
/* zone is in user, try another */
if (go_left) {
hint = zoneno * sbi.secs_per_zone - 1;
} else if (zoneno + 1 >= total_zones) {
hint = 0;
} else {
hint = (zoneno + 1) * sbi.secs_per_zone;
}
init = false;
goto find_other_zone;
}
got_it:
/* set it as dirty segment in free segmap */
ZX_ASSERT(!test_bit(segno, free_i->free_segmap));
__SetInuse(segno);
*newseg = segno;
WriteUnlock(&free_i->segmap_lock);
}
void SegMgr::ResetCurseg(int type, int modified) {
f2fs_sb_info &sbi = fs_->SbInfo();
curseg_info *curseg = CURSEG_I(&sbi, type);
summary_footer *sum_footer;
curseg->segno = curseg->next_segno;
curseg->zone = GetZoneNoFromSegNo(&sbi, curseg->segno);
curseg->next_blkoff = 0;
curseg->next_segno = kNullSegNo;
sum_footer = &(curseg->sum_blk->footer);
memset(sum_footer, 0, sizeof(summary_footer));
if (IsDataSeg(type))
SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
if (IsNodeSeg(type))
SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
__SetSitEntryType(type, curseg->segno, modified);
}
/**
* Allocate a current working segment.
* This function always allocates a free segment in LFS manner.
*/
void SegMgr::NewCurseg(int type, bool new_sec) {
f2fs_sb_info &sbi = fs_->SbInfo();
curseg_info *curseg = CURSEG_I(&sbi, type);
uint32_t segno = curseg->segno;
int dir = static_cast<int>(AllocDirection::kAllocLeft);
WriteSumPage(curseg->sum_blk, GetSumBlock(&sbi, curseg->segno));
if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
dir = static_cast<int>(AllocDirection::kAllocRight);
if (test_opt(&sbi, NOHEAP))
dir = static_cast<int>(AllocDirection::kAllocRight);
GetNewSegment(&segno, new_sec, dir);
curseg->next_segno = segno;
ResetCurseg(type, 1);
curseg->alloc_type = static_cast<uint8_t>(AllocMode::kLFS);
}
void SegMgr::__NextFreeBlkoff(curseg_info *seg, block_t start) {
f2fs_sb_info &sbi = fs_->SbInfo();
seg_entry *se = GetSegEntry(seg->segno);
block_t ofs;
for (ofs = start; ofs < sbi.blocks_per_seg; ofs++) {
if (!f2fs_test_bit(ofs, reinterpret_cast<char *>(se->ckpt_valid_map)) &&
!f2fs_test_bit(ofs, reinterpret_cast<char *>(se->cur_valid_map)))
break;
}
seg->next_blkoff = ofs;
}
/**
* If a segment is written by LFS manner, next block offset is just obtained
* by increasing the current block offset. However, if a segment is written by
* SSR manner, next block offset obtained by calling __next_free_blkoff
*/
void SegMgr::__RefreshNextBlkoff(curseg_info *seg) {
if (seg->alloc_type == static_cast<uint8_t>(AllocMode::kSSR)) {
__NextFreeBlkoff(seg, seg->next_blkoff + 1);
} else {
seg->next_blkoff++;
}
}
/**
* This function always allocates a used segment (from dirty seglist) by SSR
* manner, so it should recover the existing segment information of valid blocks
*/
void SegMgr::ChangeCurseg(int type, bool reuse) {
f2fs_sb_info &sbi = fs_->SbInfo();
dirty_seglist_info *dirty_i = DIRTY_I(&sbi);
curseg_info *curseg = CURSEG_I(&sbi, type);
uint32_t new_segno = curseg->next_segno;
f2fs_summary_block *sum_node;
Page *sum_page;
WriteSumPage(curseg->sum_blk, GetSumBlock(&sbi, curseg->segno));
__SetTestAndInuse(new_segno);
mtx_lock(&dirty_i->seglist_lock);
__RemoveDirtySegment(new_segno, DirtyType::kPre);
__RemoveDirtySegment(new_segno, DirtyType::kDirty);
mtx_unlock(&dirty_i->seglist_lock);
ResetCurseg(type, 1);
curseg->alloc_type = static_cast<uint8_t>(AllocMode::kLFS);
__NextFreeBlkoff(curseg, 0);
if (reuse) {
sum_page = GetSumPage(new_segno);
sum_node = static_cast<f2fs_summary_block *>(PageAddress(sum_page));
memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
F2fsPutPage(sum_page, 1);
}
}
/*
* flush out current segment and replace it with new segment
* This function should be returned with success, otherwise BUG
*/
void SegMgr::AllocateSegmentByDefault(int type, bool force) {
f2fs_sb_info &sbi = fs_->SbInfo();
curseg_info *curseg = CURSEG_I(&sbi, type);
// uint32_t ofs_unit;
if (force) {
NewCurseg(type, true);
goto out;
}
// TODO: BUG (we can get next_segno from prefree_segment only after checkpoint)
// ofs_unit = NeedSSR() ? 1 : sbi.segs_per_sec;
// curseg->next_segno = CheckPrefreeSegments(ofs_unit, type);
// if (curseg->next_segno != kNullSegNo)
// ChangeCurseg(type, false);
// else
if (type == CURSEG_WARM_NODE) {
NewCurseg(type, false);
} else if (false) {
#if 0 // porting needed
// TODO: IMPL (SSR)
//} else if (NeedSSR() && GetSsrSegment(type)) {
#endif
ChangeCurseg(type, true);
} else {
NewCurseg(type, false);
}
out:
sbi.segment_count[curseg->alloc_type]++;
#ifdef F2FS_BU_DEBUG
std::cout << "SegMgr::AllocateSegmentByDefault, type=" << type
<< ", curseg->segno =" << curseg->segno
<< ", FreeSections()=" << FreeSections()
<< ", PrefreeSegments()=" << PrefreeSegments()
<< ", DirtySegments()=" << DirtySegments()
<< ", TotalSegs=" << TotalSegs(&sbi)
<< ", Utilization()=" << Utilization()
<< std::endl;
#endif
}
void SegMgr::AllocateNewSegments() {
f2fs_sb_info &sbi = fs_->SbInfo();
curseg_info *curseg;
uint32_t old_curseg;
int i;
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
curseg = CURSEG_I(&sbi, i);
old_curseg = curseg->segno;
SIT_I(&sbi)->s_ops->allocate_segment(&sbi, i, true);
LocateDirtySegment(old_curseg);
}
}
#if 0 // porting needed
/*
const segment_allocation default_salloc_ops = {
.allocate_segment = AllocateSegmentByDefault,
};
*/
#endif
#if 0 // porting needed (bio)
void SegMgr::F2fsEndIoWrite(bio *bio, int err) {
// const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
// bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
// bio_private *p = bio->bi_private;
// do {
// page *page = bvec->bv_page;
// if (--bvec >= bio->bi_io_vec)
// prefetchw(&bvec->bv_page->flags);
// if (!uptodate) {
// SetPageError(page);
// if (page->mapping)
// set_bit(AS_EIO, &page->mapping->flags);
// p->sbi->ckpt->ckpt_flags |= CP_ERROR_FLAG;
// set_page_dirty(page);
// }
// end_page_writeback(page);
// dec_page_count(p->sbi, F2FS_WRITEBACK);
// } while (bvec >= bio->bi_io_vec);
// if (p->is_sync)
// complete(p->wait);
// kfree(p);
// bio_put(bio);
}
#endif
#if 0 // porting needed (bio)
bio *SegMgr::F2fsBioAlloc(block_device *bdev, sector_t first_sector, int nr_vecs,
gfp_t gfp_flags) {
// bio *bio;
// repeat:
// /* allocate new bio */
// bio = bio_alloc(gfp_flags, nr_vecs);
// if (bio == NULL && (current->flags & PF_MEMALLOC)) {
// while (!bio && (nr_vecs /= 2))
// bio = bio_alloc(gfp_flags, nr_vecs);
// }
// if (bio) {
// bio->bi_bdev = bdev;
// bio->bi_sector = first_sector;
// retry:
// bio->bi_private = kmalloc(sizeof(bio_private),
// GFP_NOFS | __GFP_HIGH);
// if (!bio->bi_private) {
// cond_resched();
// goto retry;
// }
// }
// if (bio == NULL) {
// cond_resched();
// goto repeat;
// }
// return bio;
return nullptr;
}
#endif
#if 0 // porting needed (bio)
void SegMgr::DoSubmitBio(enum page_type type, bool sync) {
// int rw = sync ? kWriteSync : kWrite;
// enum page_type btype = type > META ? META : type;
// if (type >= META_FLUSH)
// rw = kWriteFlushFua;
// if (sbi->bio[btype]) {
// bio_private *p = sbi->bio[btype]->bi_private;
// p->sbi = sbi;
// sbi->bio[btype]->bi_end_io = f2fs_end_io_write;
// if (type == META_FLUSH) {
// DECLARE_COMPLETION_ONSTACK(wait);
// p->is_sync = true;
// p->wait = &wait;
// submit_bio(rw, sbi->bio[btype]);
// wait_for_completion(&wait);
// } else {
// p->is_sync = false;
// submit_bio(rw, sbi->bio[btype]);
// }
// sbi->bio[btype] = NULL;
// }
}
#endif
#if 0 // porting needed (bio)
void SegMgr::F2fsSubmitBio(enum page_type type, bool sync) {
// down_write(&sbi->bio_sem);
// DoSubmitBio(type, sync);
// up_write(&sbi->bio_sem);
}
#endif
void SegMgr::SubmitWritePage(Page *page, block_t blk_addr, enum page_type type) {
zx_status_t ret = fs_->bc_->Writeblk(blk_addr, page->data);
if (ret) {
std::cout << "SubmitWritePage error " << ret << std::endl;
}
#if 0 // porting needed (bio)
// fs_->bc_->Sync();
// block_device *bdev = sbi->sb->s_bdev;
// verify_block_addr(sbi, blk_addr);
// down_write(&sbi->bio_sem);
// inc_page_count(sbi, F2FS_WRITEBACK);
// if (sbi->bio[type] && sbi->last_block_in_bio[type] != blk_addr - 1)
// do_submit_bio(sbi, type, false);
// alloc_new:
// if (sbi->bio[type] == NULL)
// sbi->bio[type] = f2fs_bio_alloc(bdev,
// blk_addr << (sbi->log_blocksize - 9),
// bio_get_nr_vecs(bdev), GFP_NOFS | __GFP_HIGH);
// if (bio_add_page(sbi->bio[type], page, kPageCacheSize, 0) <
// kPageCacheSize) {
// do_submit_bio(sbi, type, false);
// goto alloc_new;
// }
// sbi->last_block_in_bio[type] = blk_addr;
// up_write(&sbi->bio_sem);
#endif
}
bool SegMgr::__HasCursegSpace(int type) {
f2fs_sb_info &sbi = fs_->SbInfo();
curseg_info *curseg = CURSEG_I(&sbi, type);
if (curseg->next_blkoff < sbi.blocks_per_seg) {
return true;
}
return false;
}
int SegMgr::__GetSegmentType2(Page *page, enum page_type p_type) {
if (p_type == DATA) {
return CURSEG_HOT_DATA;
} else {
return CURSEG_HOT_NODE;
}
}
int SegMgr::__GetSegmentType4(Page *page, enum page_type p_type) {
if (p_type == DATA) {
VnodeF2fs *vnode = static_cast<f2fs::VnodeF2fs *>(page->host);
if (S_ISDIR(vnode->i_mode_)) {
return CURSEG_HOT_DATA;
} else {
return CURSEG_COLD_DATA;
}
} else {
if (fs_->Nodemgr().IS_DNODE(page) && !NodeMgr::IsColdNode(page)) {
return CURSEG_HOT_NODE;
} else {
return CURSEG_COLD_NODE;
}
return 0;
}
return 0;
}
int SegMgr::__GetSegmentType6(Page *page, enum page_type p_type) {
if (p_type == DATA) {
VnodeF2fs *vnode = static_cast<f2fs::VnodeF2fs *>(page->host);
if (S_ISDIR(vnode->i_mode_)) {
return CURSEG_HOT_DATA;
} else if (NodeMgr::IsColdData(page) || NodeMgr::IsColdFile(vnode)) {
return CURSEG_COLD_DATA;
} else {
return CURSEG_WARM_DATA;
}
} else {
if (fs_->Nodemgr().IS_DNODE(page)) {
return NodeMgr::IsColdNode(page) ? CURSEG_WARM_NODE : CURSEG_HOT_NODE;
} else {
return CURSEG_COLD_NODE;
}
}
return 0;
}
int SegMgr::__GetSegmentType(Page *page, enum page_type p_type) {
f2fs_sb_info &sbi = fs_->SbInfo();
switch (sbi.active_logs) {
case 2:
return __GetSegmentType2(page, p_type);
case 4:
return __GetSegmentType4(page, p_type);
case 6:
return __GetSegmentType6(page, p_type);
default:
ZX_ASSERT(0);
}
}
void SegMgr::DoWritePage(Page *page, block_t old_blkaddr, block_t *new_blkaddr,
f2fs_summary *sum, enum page_type p_type) {
f2fs_sb_info &sbi = fs_->SbInfo();
sit_info *sit_i = SIT_I(&sbi);
curseg_info *curseg;
uint32_t old_cursegno;
int type;
type = __GetSegmentType(page, p_type);
curseg = CURSEG_I(&sbi, type);
mtx_lock(&curseg->curseg_mutex);
*new_blkaddr = NextFreeBlkAddr(&sbi, curseg);
old_cursegno = curseg->segno;
/*
* __add_sum_entry should be resided under the curseg_mutex
* because, this function updates a summary entry in the
* current summary block.
*/
__AddSumEntry(type, sum, curseg->next_blkoff);
mtx_lock(&sit_i->sentry_lock);
__RefreshNextBlkoff(curseg);
sbi.block_count[curseg->alloc_type]++;
/*
* SIT information should be updated before segment allocation,
* since SSR needs latest valid block information.
*/
RefreshSitEntry(old_blkaddr, *new_blkaddr);
if (!__HasCursegSpace(type)) {
#if 0 // porting needed
// sit_i->s_ops->allocate_segment(&sbi, type, false);
#endif
AllocateSegmentByDefault(type, false);
}
LocateDirtySegment(old_cursegno);
LocateDirtySegment(GetSegNo(&sbi, old_blkaddr));
mtx_unlock(&sit_i->sentry_lock);
if (p_type == NODE)
fs_->Nodemgr().FillNodeFooterBlkaddr(page, NextFreeBlkAddr(&sbi, curseg));
/* writeout dirty page into bdev */
SubmitWritePage(page, *new_blkaddr, p_type);
mtx_unlock(&curseg->curseg_mutex);
}
zx_status_t SegMgr::WriteMetaPage(Page *page, WritebackControl *wbc) {
#if 0 // porting needed
// if (wbc && wbc->for_reclaim)
// return kAopWritepageActivate;
#endif
SetPageWriteback(page);
SubmitWritePage(page, page->index, META);
return ZX_OK;
}
void SegMgr::WriteNodePage(Page *page, uint32_t nid, block_t old_blkaddr,
block_t *new_blkaddr) {
f2fs_summary sum;
SetSummary(&sum, nid, 0, 0);
DoWritePage(page, old_blkaddr, new_blkaddr, &sum, NODE);
}
void SegMgr::WriteDataPage(VnodeF2fs *vnode, Page *page, dnode_of_data *dn,
block_t old_blkaddr, block_t *new_blkaddr) {
f2fs_summary sum;
node_info ni;
ZX_ASSERT(old_blkaddr != NULL_ADDR);
fs_->Nodemgr().GetNodeInfo(dn->nid, &ni);
SetSummary(&sum, dn->nid, dn->ofs_in_node, ni.version);
DoWritePage(page, old_blkaddr, new_blkaddr, &sum, DATA);
}
void SegMgr::RewriteDataPage(Page *page, block_t old_blk_addr) {
SubmitWritePage(page, old_blk_addr, DATA);
}
void SegMgr::RecoverDataPage(Page *page, f2fs_summary *sum, block_t old_blkaddr,
block_t new_blkaddr) {
f2fs_sb_info &sbi = fs_->SbInfo();
sit_info *sit_i = SIT_I(&sbi);
curseg_info *curseg;
uint32_t segno, old_cursegno;
seg_entry *se;
int type;
segno = GetSegNo(&sbi, new_blkaddr);
se = GetSegEntry(segno);
type = se->type;
if (se->valid_blocks == 0 && !IsCurSeg(&sbi, segno)) {
if (old_blkaddr == NULL_ADDR) {
type = CURSEG_COLD_DATA;
} else {
type = CURSEG_WARM_DATA;
}
}
curseg = CURSEG_I(&sbi, type);
mtx_lock(&curseg->curseg_mutex);
mtx_lock(&sit_i->sentry_lock);
old_cursegno = curseg->segno;
/* change the current segment */
if (segno != curseg->segno) {
curseg->next_segno = segno;
ChangeCurseg(type, true);
}
curseg->next_blkoff = GetSegOffFromSeg0(&sbi, new_blkaddr) & (sbi.blocks_per_seg - 1);
__AddSumEntry(type, sum, curseg->next_blkoff);
RefreshSitEntry(old_blkaddr, new_blkaddr);
LocateDirtySegment(old_cursegno);
LocateDirtySegment(GetSegNo(&sbi, old_blkaddr));
mtx_unlock(&sit_i->sentry_lock);
mtx_unlock(&curseg->curseg_mutex);
}
void SegMgr::RewriteNodePage(Page *page, f2fs_summary *sum, block_t old_blkaddr,
block_t new_blkaddr) {
f2fs_sb_info &sbi = fs_->SbInfo();
sit_info *sit_i = SIT_I(&sbi);
int type = CURSEG_WARM_NODE;
curseg_info *curseg;
uint32_t segno, old_cursegno;
block_t next_blkaddr = NodeMgr::NextBlkaddrOfNode(page);
uint32_t next_segno = GetSegNo(&sbi, next_blkaddr);
curseg = CURSEG_I(&sbi, type);
mtx_lock(&curseg->curseg_mutex);
mtx_lock(&sit_i->sentry_lock);
segno = GetSegNo(&sbi, new_blkaddr);
old_cursegno = curseg->segno;
/* change the current segment */
if (segno != curseg->segno) {
curseg->next_segno = segno;
ChangeCurseg(type, true);
}
curseg->next_blkoff = GetSegOffFromSeg0(&sbi, new_blkaddr) & (sbi.blocks_per_seg - 1);
__AddSumEntry(type, sum, curseg->next_blkoff);
/* change the current log to the next block addr in advance */
if (next_segno != segno) {
curseg->next_segno = next_segno;
ChangeCurseg(type, true);
}
curseg->next_blkoff = GetSegOffFromSeg0(&sbi, next_blkaddr) & (sbi.blocks_per_seg - 1);
/* rewrite node page */
SetPageWriteback(page);
SubmitWritePage(page, new_blkaddr, NODE);
#if 0 // porting needed
F2fsSubmitBio(NODE, true);
#endif
RefreshSitEntry(old_blkaddr, new_blkaddr);
LocateDirtySegment(old_cursegno);
LocateDirtySegment(GetSegNo(&sbi, old_blkaddr));
mtx_unlock(&sit_i->sentry_lock);
mtx_unlock(&curseg->curseg_mutex);
}
int SegMgr::ReadCompactedSummaries() {
f2fs_sb_info &sbi = fs_->SbInfo();
f2fs_checkpoint *ckpt = F2FS_CKPT(&sbi);
curseg_info *seg_i;
uint8_t *kaddr;
Page *page;
block_t start;
int i, j, offset;
start = StartSumBlock();
page = fs_->GetMetaPage(start++);
kaddr = static_cast<uint8_t *>(PageAddress(page));
/* Step 1: restore nat cache */
seg_i = CURSEG_I(&sbi, CURSEG_HOT_DATA);
memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE);
/* Step 2: restore sit cache */
seg_i = CURSEG_I(&sbi, CURSEG_COLD_DATA);
memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
offset = 2 * SUM_JOURNAL_SIZE;
/* Step 3: restore summary entries */
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
uint16_t blk_off;
uint32_t segno;
seg_i = CURSEG_I(&sbi, i);
segno = LeToCpu(ckpt->cur_data_segno[i]);
blk_off = LeToCpu(ckpt->cur_data_blkoff[i]);
seg_i->next_segno = segno;
ResetCurseg(i, 0);
seg_i->alloc_type = ckpt->alloc_type[i];
seg_i->next_blkoff = blk_off;
if (seg_i->alloc_type == static_cast<uint8_t>(AllocMode::kLFS))
blk_off = sbi.blocks_per_seg;
for (j = 0; j < blk_off; j++) {
f2fs_summary *s;
s = reinterpret_cast<f2fs_summary *>(kaddr + offset);
seg_i->sum_blk->entries[j] = *s;
offset += SUMMARY_SIZE;
if (offset + SUMMARY_SIZE <= kPageCacheSize - SUM_FOOTER_SIZE)
continue;
F2fsPutPage(page, 1);
page = nullptr;
page = fs_->GetMetaPage(start++);
kaddr = static_cast<uint8_t *>(PageAddress(page));
offset = 0;
}
}
F2fsPutPage(page, 1);
return 0;
}
int SegMgr::ReadNormalSummaries(int type) {
f2fs_sb_info &sbi = fs_->SbInfo();
f2fs_checkpoint *ckpt = F2FS_CKPT(&sbi);
f2fs_summary_block *sum;
curseg_info *curseg;
Page *new_page;
uint16_t blk_off;
uint32_t segno = 0;
block_t blk_addr = 0;
/* get segment number and block addr */
if (IsDataSeg(type)) {
segno = LeToCpu(ckpt->cur_data_segno[type]);
blk_off = LeToCpu(ckpt->cur_data_blkoff[type - CURSEG_HOT_DATA]);
if (ckpt->ckpt_flags & CP_UMOUNT_FLAG) {
blk_addr = SumBlkAddr(NR_CURSEG_TYPE, type);
} else
blk_addr = SumBlkAddr(NR_CURSEG_DATA_TYPE, type);
} else {
segno = LeToCpu(ckpt->cur_node_segno[type - CURSEG_HOT_NODE]);
blk_off = LeToCpu(ckpt->cur_node_blkoff[type - CURSEG_HOT_NODE]);
if (ckpt->ckpt_flags & CP_UMOUNT_FLAG) {
blk_addr = SumBlkAddr(NR_CURSEG_NODE_TYPE, type - CURSEG_HOT_NODE);
} else
blk_addr = GetSumBlock(&sbi, segno);
}
new_page = fs_->GetMetaPage(blk_addr);
sum = static_cast<f2fs_summary_block *>(PageAddress(new_page));
if (IsNodeSeg(type)) {
if (ckpt->ckpt_flags & CP_UMOUNT_FLAG) {
f2fs_summary *ns = &sum->entries[0];
uint32_t i;
for (i = 0; i < sbi.blocks_per_seg; i++, ns++) {
ns->version = 0;
ns->ofs_in_node = 0;
}
} else {
if (NodeMgr::RestoreNodeSummary(fs_, segno, sum)) {
F2fsPutPage(new_page, 1);
return -EINVAL;
}
}
}
/* set uncompleted segment to curseg */
curseg = CURSEG_I(&sbi, type);
mtx_lock(&curseg->curseg_mutex);
memcpy(curseg->sum_blk, sum, kPageCacheSize);
curseg->next_segno = segno;
ResetCurseg(type, 0);
curseg->alloc_type = ckpt->alloc_type[type];
curseg->next_blkoff = blk_off;
mtx_unlock(&curseg->curseg_mutex);
F2fsPutPage(new_page, 1);
return 0;
}
zx_status_t SegMgr::RestoreCursegSummaries() {
f2fs_sb_info &sbi = fs_->SbInfo();
int type = CURSEG_HOT_DATA;
if (sbi.ckpt->ckpt_flags & CP_COMPACT_SUM_FLAG) {
/* restore for compacted data summary */
if (ReadCompactedSummaries())
return ZX_ERR_INVALID_ARGS;
type = CURSEG_HOT_NODE;
}
for (; type <= CURSEG_COLD_NODE; type++) {
if (ReadNormalSummaries(type))
return ZX_ERR_INVALID_ARGS;
}
return ZX_OK;
}
void SegMgr::WriteCompactedSummaries(block_t blkaddr) {
f2fs_sb_info &sbi = fs_->SbInfo();
Page *page;
uint8_t *kaddr;
f2fs_summary *summary;
curseg_info *seg_i;
int written_size = 0;
int i, j;
page = fs_->GrabMetaPage(blkaddr++);
kaddr = static_cast<uint8_t *>(PageAddress(page));
/* Step 1: write nat cache */
seg_i = CURSEG_I(&sbi, CURSEG_HOT_DATA);
memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE);
written_size += SUM_JOURNAL_SIZE;
/* Step 2: write sit cache */
seg_i = CURSEG_I(&sbi, CURSEG_COLD_DATA);
memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits, SUM_JOURNAL_SIZE);
written_size += SUM_JOURNAL_SIZE;
// set_page_dirty(page);
FlushDirtyMetaPage(fs_, page);
/* Step 3: write summary entries */
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
uint16_t blkoff;
seg_i = CURSEG_I(&sbi, i);
if (sbi.ckpt->alloc_type[i] == static_cast<uint8_t>(AllocMode::kLFS)) {
blkoff = sbi.blocks_per_seg;
} else {
blkoff = CursegBlkoff(i);
}
for (j = 0; j < blkoff; j++) {
if (!page) {
page = fs_->GrabMetaPage(blkaddr++);
kaddr = static_cast<uint8_t *>(PageAddress(page));
written_size = 0;
}
summary = reinterpret_cast<f2fs_summary *>(kaddr + written_size);
*summary = seg_i->sum_blk->entries[j];
written_size += SUMMARY_SIZE;
#if 0 // porting needed
// set_page_dirty(page);
#endif
FlushDirtyMetaPage(fs_, page);
if (written_size + SUMMARY_SIZE <= kPageCacheSize - SUM_FOOTER_SIZE)
continue;
F2fsPutPage(page, 1);
page = nullptr;
}
}
if (page)
F2fsPutPage(page, 1);
}
void SegMgr::WriteNormalSummaries(block_t blkaddr, int type) {
f2fs_sb_info &sbi = fs_->SbInfo();
int i, end;
if (IsDataSeg(type)) {
end = type + NR_CURSEG_DATA_TYPE;
} else {
end = type + NR_CURSEG_NODE_TYPE;
}
for (i = type; i < end; i++) {
curseg_info *sum = CURSEG_I(&sbi, i);
mtx_lock(&sum->curseg_mutex);
WriteSumPage(sum->sum_blk, blkaddr + (i - type));
mtx_unlock(&sum->curseg_mutex);
}
}
void SegMgr::WriteDataSummaries(block_t start_blk) {
f2fs_sb_info &sbi = fs_->SbInfo();
if (sbi.ckpt->ckpt_flags & CP_COMPACT_SUM_FLAG) {
WriteCompactedSummaries(start_blk);
} else {
WriteNormalSummaries(start_blk, CURSEG_HOT_DATA);
}
}
void SegMgr::WriteNodeSummaries(block_t start_blk) {
f2fs_sb_info &sbi = fs_->SbInfo();
if (sbi.ckpt->ckpt_flags & CP_UMOUNT_FLAG)
WriteNormalSummaries(start_blk, CURSEG_HOT_NODE);
}
int SegMgr::LookupJournalInCursum(f2fs_summary_block *sum, int type, uint32_t val,
int alloc) {
int i;
if (type == NAT_JOURNAL) {
for (i = 0; i < nats_in_cursum(sum); i++) {
if (LeToCpu(nid_in_journal(sum, i)) == val)
return i;
}
if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES)
return update_nats_in_cursum(sum, 1);
} else if (type == SIT_JOURNAL) {
for (i = 0; i < sits_in_cursum(sum); i++) {
if (LeToCpu(segno_in_journal(sum, i)) == val)
return i;
}
if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES)
return update_sits_in_cursum(sum, 1);
}
return -1;
}
Page *SegMgr::GetCurrentSitPage(uint32_t segno) {
f2fs_sb_info &sbi = fs_->SbInfo();
sit_info *sit_i = SIT_I(&sbi);
uint32_t offset = SitBlockOffset(sit_i, segno);
block_t blk_addr = sit_i->sit_base_addr + offset;
CheckSegRange(segno);
/* calculate sit block address */
if (f2fs_test_bit(offset, sit_i->sit_bitmap))
blk_addr += sit_i->sit_blocks;
return fs_->GetMetaPage(blk_addr);
}
Page *SegMgr::GetNextSitPage(uint32_t start) {
f2fs_sb_info &sbi = fs_->SbInfo();
sit_info *sit_i = SIT_I(&sbi);
Page *src_page, *dst_page;
pgoff_t src_off, dst_off;
void *src_addr, *dst_addr;
src_off = CurrentSitAddr(start);
dst_off = NextSitAddr(src_off);
/* get current sit block page without lock */
src_page = fs_->GetMetaPage(src_off);
dst_page = fs_->GrabMetaPage(dst_off);
ZX_ASSERT(!PageDirty(src_page));
src_addr = PageAddress(src_page);
dst_addr = PageAddress(dst_page);
memcpy(dst_addr, src_addr, kPageCacheSize);
#if 0 // porting needed
// set_page_dirty(dst_page);
#endif
F2fsPutPage(src_page, 1);
SetToNextSit(sit_i, start);
return dst_page;
}
bool SegMgr::FlushSitsInJournal() {
f2fs_sb_info &sbi = fs_->SbInfo();
curseg_info *curseg = CURSEG_I(&sbi, CURSEG_COLD_DATA);
f2fs_summary_block *sum = curseg->sum_blk;
int i;
/*
* If the journal area in the current summary is full of sit entries,
* all the sit entries will be flushed. Otherwise the sit entries
* are not able to replace with newly hot sit entries.
*/
if (sits_in_cursum(sum) >= SIT_JOURNAL_ENTRIES) {
for (i = sits_in_cursum(sum) - 1; i >= 0; i--) {
uint32_t segno;
segno = LeToCpu(segno_in_journal(sum, i));
__MarkSitEntryDirty(segno);
}
update_sits_in_cursum(sum, -sits_in_cursum(sum));
return true;
}
return false;
}
/**
* CP calls this function, which flushes SIT entries including sit_journal,
* and moves prefree segs to free segs.
*/
void SegMgr::FlushSitEntries() {
f2fs_sb_info &sbi = fs_->SbInfo();
sit_info *sit_i = SIT_I(&sbi);
uint64_t *bitmap = sit_i->dirty_sentries_bitmap;
curseg_info *curseg = CURSEG_I(&sbi, CURSEG_COLD_DATA);
f2fs_summary_block *sum = curseg->sum_blk;
uint64_t nsegs = TotalSegs(&sbi);
Page *page = nullptr;
f2fs_sit_block *raw_sit = nullptr;
uint32_t start = 0, end = 0;
uint32_t segno = -1;
bool flushed;
mtx_lock(&curseg->curseg_mutex);
mtx_lock(&sit_i->sentry_lock);
/*
* "flushed" indicates whether sit entries in journal are flushed
* to the SIT area or not.
*/
flushed = FlushSitsInJournal();
while ((segno = find_next_bit_le(bitmap, nsegs, segno + 1)) < nsegs) {
seg_entry *se = GetSegEntry(segno);
int sit_offset, offset;
sit_offset = SitEntryOffset(sit_i, segno);
if (flushed)
goto to_sit_page;
offset = LookupJournalInCursum(sum, SIT_JOURNAL, segno, 1);
if (offset >= 0) {
segno_in_journal(sum, offset) = CpuToLe(segno);
SegInfoToRawSit(se, &sit_in_journal(sum, offset));
goto flush_done;
}
to_sit_page:
if (!page || (start > segno) || (segno > end)) {
if (page) {
// set_page_dirty(page, fs_);
FlushDirtyMetaPage(fs_, page);
F2fsPutPage(page, 1);
page = nullptr;
}
start = StartSegNo(sit_i, segno);
end = start + SIT_ENTRY_PER_BLOCK - 1;
/* read sit block that will be updated */
page = GetNextSitPage(start);
raw_sit = static_cast<f2fs_sit_block *>(PageAddress(page));
}
/* udpate entry in SIT block */
SegInfoToRawSit(se, &raw_sit->entries[sit_offset]);
flush_done:
__clear_bit(segno, bitmap);
sit_i->dirty_sentries--;
}
mtx_unlock(&sit_i->sentry_lock);
mtx_unlock(&curseg->curseg_mutex);
/* writeout last modified SIT block */
#if 0 // porting needed
// set_page_dirty(page, fs_);
#endif
FlushDirtyMetaPage(fs_, page);
F2fsPutPage(page, 1);
SetPrefreeAsFreeSegments();
}
/*
* Build
*/
zx_status_t SegMgr::BuildSitInfo() {
f2fs_sb_info &sbi = fs_->SbInfo();
f2fs_super_block *raw_super = F2FS_RAW_SUPER(&sbi);
f2fs_checkpoint *ckpt = F2FS_CKPT(&sbi);
sit_info *sit_i;
uint32_t sit_segs, start;
char *src_bitmap, *dst_bitmap;
uint32_t bitmap_size;
/* allocate memory for SIT information */
sit_i = static_cast<sit_info *>(malloc(sizeof(sit_info)));
memset(sit_i, 0, sizeof(sit_info));
if (!sit_i)
return ZX_ERR_NO_MEMORY;
SM_I(&sbi)->sit_info = sit_i;
sit_i->sentries = static_cast<seg_entry *>(calloc(TotalSegs(&sbi), sizeof(seg_entry)));
if (!sit_i->sentries)
return ZX_ERR_NO_MEMORY;
bitmap_size = BitmapSize(TotalSegs(&sbi));
sit_i->dirty_sentries_bitmap = static_cast<uint64_t *>(malloc(bitmap_size));
memset(sit_i->dirty_sentries_bitmap, 0, bitmap_size);
if (!sit_i->dirty_sentries_bitmap)
return ZX_ERR_NO_MEMORY;
for (start = 0; start < TotalSegs(&sbi); start++) {
sit_i->sentries[start].cur_valid_map =
static_cast<uint8_t *>(malloc(SIT_VBLOCK_MAP_SIZE));
memset(sit_i->sentries[start].cur_valid_map, 0, SIT_VBLOCK_MAP_SIZE);
sit_i->sentries[start].ckpt_valid_map =
static_cast<uint8_t *>(malloc(SIT_VBLOCK_MAP_SIZE));
memset(sit_i->sentries[start].ckpt_valid_map, 0, SIT_VBLOCK_MAP_SIZE);
if (!sit_i->sentries[start].cur_valid_map || !sit_i->sentries[start].ckpt_valid_map)
return ZX_ERR_NO_MEMORY;
}
if (sbi.segs_per_sec > 1) {
sit_i->sec_entries =
static_cast<sec_entry *>(calloc(sbi.total_sections, sizeof(sec_entry)));
if (!sit_i->sec_entries)
return ZX_ERR_NO_MEMORY;
}
/* get information related with SIT */
sit_segs = LeToCpu(raw_super->segment_count_sit) >> 1;
/* setup SIT bitmap from ckeckpoint pack */
bitmap_size = __bitmap_size(&sbi, SIT_BITMAP);
src_bitmap = static_cast<char *>(__bitmap_ptr(&sbi, SIT_BITMAP));
dst_bitmap = static_cast<char *>(malloc(bitmap_size));
memset(dst_bitmap, 0, bitmap_size);
if (!dst_bitmap)
return ZX_ERR_NO_MEMORY;
memcpy(dst_bitmap, src_bitmap, bitmap_size);
#if 0 // porting needed
/* init SIT information */
// sit_i->s_ops = &default_salloc_ops;
#endif
auto cur_time = time(nullptr);
sit_i->sit_base_addr = LeToCpu(raw_super->sit_blkaddr);
sit_i->sit_blocks = sit_segs << sbi.log_blocks_per_seg;
sit_i->written_valid_blocks = LeToCpu(ckpt->valid_block_count);
sit_i->sit_bitmap = dst_bitmap;
sit_i->bitmap_size = bitmap_size;
sit_i->dirty_sentries = 0;
sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
sit_i->elapsed_time = LeToCpu(sbi.ckpt->elapsed_time);
sit_i->mounted_time = cur_time;
mtx_init(&sit_i->sentry_lock, mtx_plain);
return ZX_OK;
}
zx_status_t SegMgr::BuildFreeSegmap() {
f2fs_sb_info &sbi = fs_->SbInfo();
f2fs_sm_info *sm_info = SM_I(&sbi);
free_segmap_info *free_i;
uint32_t bitmap_size, sec_bitmap_size;
/* allocate memory for free segmap information */
free_i = static_cast<free_segmap_info *>(malloc(sizeof(free_segmap_info)));
memset(free_i, 0, sizeof(free_segmap_info));
if (!free_i)
return ZX_ERR_NO_MEMORY;
SM_I(&sbi)->free_info = free_i;
bitmap_size = BitmapSize(TotalSegs(&sbi));
free_i->free_segmap = static_cast<uint64_t *>(malloc(bitmap_size));
if (!free_i->free_segmap)
return ZX_ERR_NO_MEMORY;
sec_bitmap_size = BitmapSize(sbi.total_sections);
free_i->free_secmap = static_cast<uint64_t *>(malloc(sec_bitmap_size));
if (!free_i->free_secmap)
return ZX_ERR_NO_MEMORY;
/* set all segments as dirty temporarily */
memset(free_i->free_segmap, 0xff, bitmap_size);
memset(free_i->free_secmap, 0xff, sec_bitmap_size);
/* init free segmap information */
free_i->start_segno = GetSegNoFromSeg0(&sbi, sm_info->main_blkaddr);
free_i->free_segments = 0;
free_i->free_sections = 0;
RwlockInit(&free_i->segmap_lock);
return ZX_OK;
}
zx_status_t SegMgr::BuildCurseg() {
f2fs_sb_info &sbi = fs_->SbInfo();
curseg_info *array = nullptr;
int i;
array = static_cast<curseg_info *>(calloc(NR_CURSEG_TYPE, sizeof(*array)));
if (!array)
return ZX_ERR_NO_MEMORY;
SM_I(&sbi)->curseg_array = array;
for (i = 0; i < NR_CURSEG_TYPE; i++) {
mtx_init(&array[i].curseg_mutex, mtx_plain);
array[i].sum_blk = static_cast<f2fs_summary_block *>(malloc(kPageCacheSize));
memset(array[i].sum_blk, 0, kPageCacheSize);
if (!array[i].sum_blk)
return ZX_ERR_NO_MEMORY;
array[i].segno = kNullSegNo;
array[i].next_blkoff = 0;
}
return RestoreCursegSummaries();
}
void SegMgr::BuildSitEntries() {
f2fs_sb_info &sbi = fs_->SbInfo();
sit_info *sit_i = SIT_I(&sbi);
curseg_info *curseg = CURSEG_I(&sbi, CURSEG_COLD_DATA);
f2fs_summary_block *sum = curseg->sum_blk;
uint32_t start;
for (start = 0; start < TotalSegs(&sbi); start++) {
seg_entry *se = &sit_i->sentries[start];
f2fs_sit_block *sit_blk;
f2fs_sit_entry sit;
Page *page;
int i;
mtx_lock(&curseg->curseg_mutex);
for (i = 0; i < sits_in_cursum(sum); i++) {
if (LeToCpu(segno_in_journal(sum, i)) == start) {
sit = sit_in_journal(sum, i);
mtx_unlock(&curseg->curseg_mutex);
goto got_it;
}
}
mtx_unlock(&curseg->curseg_mutex);
page = GetCurrentSitPage(start);
sit_blk = static_cast<f2fs_sit_block *>(PageAddress(page));
sit = sit_blk->entries[SitEntryOffset(sit_i, start)];
F2fsPutPage(page, 1);
got_it:
CheckBlockCount(start, &sit);
SegInfoFromRawSit(se, &sit);
if (sbi.segs_per_sec > 1) {
sec_entry *e = GetSecEntry(start);
e->valid_blocks += se->valid_blocks;
}
}
}
void SegMgr::InitFreeSegmap() {
f2fs_sb_info &sbi = fs_->SbInfo();
uint32_t start;
int type;
for (start = 0; start < TotalSegs(&sbi); start++) {
seg_entry *sentry = GetSegEntry(start);
if (!sentry->valid_blocks)
__SetFree(start);
}
/* set use the current segments */
for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
curseg_info *curseg_t = CURSEG_I(&sbi, type);
__SetTestAndInuse(curseg_t->segno);
}
}
void SegMgr::InitDirtySegmap() {
f2fs_sb_info &sbi = fs_->SbInfo();
dirty_seglist_info *dirty_i = DIRTY_I(&sbi);
free_segmap_info *free_i = FREE_I(&sbi);
uint32_t segno = 0, offset = 0;
uint16_t valid_blocks;
int full_block_cnt = 0, dirty_block_cnt = 0;
while (segno < TotalSegs(&sbi)) {
/* find dirty segment based on free segmap */
segno = FindNextInuse(free_i, TotalSegs(&sbi), offset);
if (segno >= TotalSegs(&sbi))
break;
offset = segno + 1;
valid_blocks = GetValidBlocks(segno, 0);
if (valid_blocks >= sbi.blocks_per_seg || !valid_blocks) {
full_block_cnt++;
continue;
}
mtx_lock(&dirty_i->seglist_lock);
__LocateDirtySegment(segno, DirtyType::kDirty);
dirty_block_cnt++;
mtx_unlock(&dirty_i->seglist_lock);
}
#ifdef F2FS_BU_DEBUG
std::cout << "SegMgr::InitDirtySegmap, full_block_cnt=" << full_block_cnt
<< ", dirty_block_cnt=" << dirty_block_cnt << std::endl;
#endif
}
zx_status_t SegMgr::InitVictimSegmap() {
f2fs_sb_info &sbi = fs_->SbInfo();
dirty_seglist_info *dirty_i = DIRTY_I(&sbi);
uint32_t bitmap_size = BitmapSize(TotalSegs(&sbi));
dirty_i->victim_segmap[static_cast<int>(GcType::kFgGc)] = static_cast<uint64_t *>(malloc(bitmap_size));
memset(dirty_i->victim_segmap[static_cast<int>(GcType::kFgGc)], 0, bitmap_size);
dirty_i->victim_segmap[static_cast<int>(GcType::kBgGc)] = static_cast<uint64_t *>(malloc(bitmap_size));
memset(dirty_i->victim_segmap[static_cast<int>(GcType::kBgGc)], 0, bitmap_size);
if (!dirty_i->victim_segmap[static_cast<int>(GcType::kFgGc)] || !dirty_i->victim_segmap[static_cast<int>(GcType::kBgGc)])
return ZX_ERR_NO_MEMORY;
return ZX_OK;
}
zx_status_t SegMgr::BuildDirtySegmap() {
f2fs_sb_info &sbi = fs_->SbInfo();
dirty_seglist_info *dirty_i;
uint32_t bitmap_size, i;
dirty_i = static_cast<dirty_seglist_info *>(malloc(sizeof(dirty_seglist_info)));
memset(dirty_i, 0, sizeof(dirty_seglist_info));
if (!dirty_i)
return ZX_ERR_NO_MEMORY;
SM_I(&sbi)->dirty_info = dirty_i;
mtx_init(&dirty_i->seglist_lock, mtx_plain);
bitmap_size = BitmapSize(TotalSegs(&sbi));
for (i = 0; i < static_cast<int>(DirtyType::kNrDirtytype); i++) {
dirty_i->dirty_segmap[i] = static_cast<uint64_t *>(malloc(bitmap_size));
memset(dirty_i->dirty_segmap[i], 0, bitmap_size);
dirty_i->nr_dirty[i] = 0;
if (!dirty_i->dirty_segmap[i])
return ZX_ERR_NO_MEMORY;
}
InitDirtySegmap();
return InitVictimSegmap();
}
/**
* Update min, max modified time for cost-benefit GC algorithm
*/
void SegMgr::InitMinMaxMtime() {
f2fs_sb_info &sbi = fs_->SbInfo();
sit_info *sit_i = SIT_I(&sbi);
uint32_t segno;
mtx_lock(&sit_i->sentry_lock);
sit_i->min_mtime = LLONG_MAX;
for (segno = 0; segno < TotalSegs(&sbi); segno += sbi.segs_per_sec) {
uint32_t i;
uint64_t mtime = 0;
for (i = 0; i < sbi.segs_per_sec; i++)
mtime += GetSegEntry(segno + i)->mtime;
mtime = DivU64(mtime, sbi.segs_per_sec);
if (sit_i->min_mtime > mtime)
sit_i->min_mtime = mtime;
}
sit_i->max_mtime = GetMtime();
mtx_unlock(&sit_i->sentry_lock);
}
zx_status_t SegMgr::BuildSegmentManager() {
f2fs_sb_info &sbi = fs_->SbInfo();
f2fs_super_block *raw_super = F2FS_RAW_SUPER(&sbi);
f2fs_checkpoint *ckpt = F2FS_CKPT(&sbi);
f2fs_sm_info *sm_info = nullptr;
zx_status_t err = 0;
sm_info = new f2fs_sm_info;
if (!sm_info)
return ZX_ERR_NO_MEMORY;
/* init sm info */
sbi.sm_info = sm_info;
list_initialize(&sm_info->wblist_head);
SpinLockInit(&sm_info->wblist_lock);
sm_info->seg0_blkaddr = LeToCpu(raw_super->segment0_blkaddr);
sm_info->main_blkaddr = LeToCpu(raw_super->main_blkaddr);
sm_info->segment_count = LeToCpu(raw_super->segment_count);
sm_info->reserved_segments = LeToCpu(ckpt->rsvd_segment_count);
sm_info->ovp_segments = LeToCpu(ckpt->overprov_segment_count);
sm_info->main_segments = LeToCpu(raw_super->segment_count_main);
sm_info->ssa_blkaddr = LeToCpu(raw_super->ssa_blkaddr);
err = BuildSitInfo();
if (err)
return err;
err = BuildFreeSegmap();
if (err)
return err;
err = BuildCurseg();
if (err)
return err;
/* reinit free segmap based on SIT */
BuildSitEntries();
InitFreeSegmap();
err = BuildDirtySegmap();
if (err)
return err;
#ifdef F2FS_BU_DEBUG
std::cout << "SegMgr::BuildSegmentManager(), TotalSegs(&sbi)=" << TotalSegs(&sbi)
<< std::endl;
std::cout << "SegMgr::BuildSegmentManager(), ReservedSections()=" << ReservedSections()
<< std::endl;
std::cout << "SegMgr::BuildSegmentManager(), OverprovisionSections()=" << OverprovisionSections()
<< std::endl;
#endif
InitMinMaxMtime();
return ZX_OK;
}
void SegMgr::DiscardDirtySegmap(DirtyType dirty_type) {
f2fs_sb_info &sbi = fs_->SbInfo();
dirty_seglist_info *dirty_i = DIRTY_I(&sbi);
mtx_lock(&dirty_i->seglist_lock);
free(dirty_i->dirty_segmap[static_cast<int>(dirty_type)]);
dirty_i->nr_dirty[static_cast<int>(dirty_type)] = 0;
mtx_unlock(&dirty_i->seglist_lock);
}
void SegMgr::ResetVictimSegmap() {
f2fs_sb_info &sbi = fs_->SbInfo();
uint32_t bitmap_size = BitmapSize(TotalSegs(&sbi));
memset(DIRTY_I(&sbi)->victim_segmap[static_cast<int>(GcType::kFgGc)], 0, bitmap_size);
}
void SegMgr::DestroyVictimSegmap() {
f2fs_sb_info &sbi = fs_->SbInfo();
dirty_seglist_info *dirty_i = DIRTY_I(&sbi);
free(dirty_i->victim_segmap[static_cast<int>(GcType::kFgGc)]);
free(dirty_i->victim_segmap[static_cast<int>(GcType::kBgGc)]);
}
void SegMgr::DestroyDirtySegmap() {
f2fs_sb_info &sbi = fs_->SbInfo();
dirty_seglist_info *dirty_i = DIRTY_I(&sbi);
int i;
if (!dirty_i)
return;
/* discard pre-free/dirty segments list */
for (i = 0; i < static_cast<int>(DirtyType::kNrDirtytype); i++)
DiscardDirtySegmap(static_cast<DirtyType>(i));
DestroyVictimSegmap();
SM_I(&sbi)->dirty_info = nullptr;
free(dirty_i);
}
// TODO: destroy_curseg
void SegMgr::DestroyCurseg() {
f2fs_sb_info &sbi = fs_->SbInfo();
curseg_info *array = SM_I(&sbi)->curseg_array;
int i;
if (!array)
return;
SM_I(&sbi)->curseg_array = nullptr;
for (i = 0; i < NR_CURSEG_TYPE; i++)
free(array[i].sum_blk);
free(array);
}
void SegMgr::DestroyFreeSegmap() {
f2fs_sb_info &sbi = fs_->SbInfo();
free_segmap_info *free_i = SM_I(&sbi)->free_info;
if (!free_i)
return;
SM_I(&sbi)->free_info = nullptr;
free(free_i->free_segmap);
free(free_i->free_secmap);
free(free_i);
}
void SegMgr::DestroySitInfo() {
f2fs_sb_info &sbi = fs_->SbInfo();
sit_info *sit_i = SIT_I(&sbi);
uint32_t start;
if (!sit_i)
return;
if (sit_i->sentries) {
for (start = 0; start < TotalSegs(&sbi); start++) {
free(sit_i->sentries[start].cur_valid_map);
free(sit_i->sentries[start].ckpt_valid_map);
}
}
free(sit_i->sentries);
free(sit_i->sec_entries);
free(sit_i->dirty_sentries_bitmap);
SM_I(&sbi)->sit_info = nullptr;
free(sit_i->sit_bitmap);
free(sit_i);
}
void SegMgr::DestroySegmentManager() {
f2fs_sb_info &sbi = fs_->SbInfo();
f2fs_sm_info *sm_info = SM_I(&sbi);
DestroyDirtySegmap();
DestroyCurseg();
DestroyFreeSegmap();
DestroySitInfo();
sbi.sm_info = nullptr;
free(sm_info);
}
} // namespace f2fs