blob: cf59ce344be10184ad02e73d4ba9de621e81da1d [file] [log] [blame]
// Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <string.h>
#include <sys/stat.h>
#include "f2fs.h"
namespace f2fs {
/*
* inline functions
*/
inline struct seg_entry *SegMgr::GetSegEntry(unsigned int segno) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct sit_info *sit_i = SIT_I(&sbi);
return &sit_i->sentries[segno];
}
inline struct sec_entry *SegMgr::GetSecEntry(unsigned int segno) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct sit_info *sit_i = SIT_I(&sbi);
return &sit_i->sec_entries[GET_SECNO(&sbi, segno)];
}
inline unsigned int SegMgr::GetValidBlocks(unsigned int segno, int section) {
/*
* In order to get # of valid blocks in a section instantly from many
* segments, f2fs manages two counting structures separately.
*/
if (section > 1)
return GetSecEntry(segno)->valid_blocks;
else
return GetSegEntry(segno)->valid_blocks;
}
inline void SegMgr::SegInfoFromRawSit(struct seg_entry *se, struct f2fs_sit_entry *rs) {
se->valid_blocks = GET_SIT_VBLOCKS(rs);
se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs);
memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
se->type = GET_SIT_TYPE(rs);
se->mtime = le64_to_cpu(rs->mtime);
}
inline void SegMgr::SegInfoToRawSit(struct seg_entry *se, struct f2fs_sit_entry *rs) {
unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) | se->valid_blocks;
rs->vblocks = cpu_to_le16(raw_vblocks);
memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
se->ckpt_valid_blocks = se->valid_blocks;
rs->mtime = cpu_to_le64(se->mtime);
}
inline unsigned int SegMgr::FindNextInuse(struct free_segmap_info *free_i, unsigned int max,
unsigned int segno) {
unsigned int ret;
read_lock(&free_i->segmap_lock);
ret = find_next_bit_le(free_i->free_segmap, max, segno);
read_unlock(&free_i->segmap_lock);
return ret;
}
inline void SegMgr::__SetFree(unsigned int segno) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct free_segmap_info *free_i = FREE_I(&sbi);
unsigned int secno = segno / sbi.segs_per_sec;
unsigned int start_segno = secno * sbi.segs_per_sec;
unsigned int next;
write_lock(&free_i->segmap_lock);
clear_bit(segno, free_i->free_segmap);
free_i->free_segments++;
next = find_next_bit_le(free_i->free_segmap, TOTAL_SEGS(&sbi), start_segno);
if (next >= start_segno + sbi.segs_per_sec) {
clear_bit(secno, free_i->free_secmap);
free_i->free_sections++;
}
write_unlock(&free_i->segmap_lock);
}
inline void SegMgr::__SetInuse(unsigned int segno) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct free_segmap_info *free_i = FREE_I(&sbi);
unsigned int secno = segno / sbi.segs_per_sec;
set_bit(segno, free_i->free_segmap);
free_i->free_segments--;
if (!test_and_set_bit(secno, free_i->free_secmap))
free_i->free_sections--;
}
inline void SegMgr::__SetTestAndFree(unsigned int segno) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct free_segmap_info *free_i = FREE_I(&sbi);
unsigned int secno = segno / sbi.segs_per_sec;
unsigned int start_segno = secno * sbi.segs_per_sec;
unsigned int next;
write_lock(&free_i->segmap_lock);
if (test_and_clear_bit(segno, free_i->free_segmap)) {
free_i->free_segments++;
next = find_next_bit_le(free_i->free_segmap, TOTAL_SEGS(&sbi), start_segno);
if (next >= start_segno + sbi.segs_per_sec) {
if (test_and_clear_bit(secno, free_i->free_secmap))
free_i->free_sections++;
}
}
write_unlock(&free_i->segmap_lock);
}
inline void SegMgr::__SetTestAndInuse(unsigned int segno) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct free_segmap_info *free_i = FREE_I(&sbi);
unsigned int secno = segno / sbi.segs_per_sec;
write_lock(&free_i->segmap_lock);
if (!test_and_set_bit(segno, free_i->free_segmap)) {
free_i->free_segments--;
if (!test_and_set_bit(secno, free_i->free_secmap))
free_i->free_sections--;
}
write_unlock(&free_i->segmap_lock);
}
void SegMgr::GetSitBitmap(void *dst_addr) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct sit_info *sit_i = SIT_I(&sbi);
memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size);
}
#if 0 // porting needed
inline block_t SegMgr::WrittenBlockCount() {
f2fs_sb_info &sbi = fs_->SbInfo();
struct sit_info *sit_i = SIT_I(&sbi);
block_t vblocks;
mtx_lock(&sit_i->sentry_lock);
vblocks = sit_i->written_valid_blocks;
mtx_unlock(&sit_i->sentry_lock);
return vblocks;
}
#endif
unsigned int SegMgr::FreeSegments() {
f2fs_sb_info &sbi = fs_->SbInfo();
struct free_segmap_info *free_i = FREE_I(&sbi);
unsigned int free_segs;
read_lock(&free_i->segmap_lock);
free_segs = free_i->free_segments;
read_unlock(&free_i->segmap_lock);
return free_segs;
}
inline int SegMgr::ReservedSegments() {
f2fs_sb_info &sbi = fs_->SbInfo();
return SM_I(&sbi)->reserved_segments;
}
inline unsigned int SegMgr::FreeSections() {
f2fs_sb_info &sbi = fs_->SbInfo();
struct free_segmap_info *free_i = FREE_I(&sbi);
unsigned int free_secs;
read_lock(&free_i->segmap_lock);
free_secs = free_i->free_sections;
read_unlock(&free_i->segmap_lock);
return free_secs;
}
inline unsigned int SegMgr::PrefreeSegments() {
f2fs_sb_info &sbi = fs_->SbInfo();
return DIRTY_I(&sbi)->nr_dirty[PRE];
}
inline unsigned int SegMgr::DirtySegments() {
f2fs_sb_info &sbi = fs_->SbInfo();
return DIRTY_I(&sbi)->nr_dirty[DIRTY_HOT_DATA] + DIRTY_I(&sbi)->nr_dirty[DIRTY_WARM_DATA] +
DIRTY_I(&sbi)->nr_dirty[DIRTY_COLD_DATA] + DIRTY_I(&sbi)->nr_dirty[DIRTY_HOT_NODE] +
DIRTY_I(&sbi)->nr_dirty[DIRTY_WARM_NODE] + DIRTY_I(&sbi)->nr_dirty[DIRTY_COLD_NODE];
}
inline int SegMgr::OverprovisionSegments() {
f2fs_sb_info &sbi = fs_->SbInfo();
return SM_I(&sbi)->ovp_segments;
}
inline int SegMgr::OverprovisionSections() {
f2fs_sb_info &sbi = fs_->SbInfo();
return ((unsigned int)OverprovisionSegments()) / sbi.segs_per_sec;
}
inline int SegMgr::ReservedSections() {
f2fs_sb_info &sbi = fs_->SbInfo();
return ((unsigned int)ReservedSegments()) / sbi.segs_per_sec;
}
inline bool SegMgr::NeedSSR() { return (FreeSections() < (unsigned int)OverprovisionSections()); }
inline int SegMgr::GetSsrSegment(int type) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct curseg_info *curseg = CURSEG_I(&sbi, type);
return DIRTY_I(&sbi)->v_ops->get_victim(&sbi, &(curseg)->next_segno, BG_GC, type, SSR);
}
inline bool SegMgr::HasNotEnoughFreeSecs() {
return FreeSections() <= (unsigned int)ReservedSections();
}
inline int SegMgr::Utilization() {
f2fs_sb_info &sbi = fs_->SbInfo();
return (long int)valid_user_blocks(&sbi) * 100 / (long int)sbi.user_block_count;
}
/*
* Sometimes f2fs may be better to drop out-of-place update policy.
* So, if fs utilization is over MIN_IPU_UTIL, then f2fs tries to write
* data in the original place likewise other traditional file systems.
* But, currently set 100 in percentage, which means it is disabled.
* See below need_inplace_update().
*/
#define MIN_IPU_UTIL 50
bool SegMgr::NeedInplaceUpdate(VnodeF2fs *vnode) {
if (S_ISDIR(vnode->i_mode))
return false;
if (/*NeedSSR() &&*/ Utilization() > MIN_IPU_UTIL)
return true;
return false;
}
unsigned int SegMgr::CursegSegno(int type) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct curseg_info *curseg = CURSEG_I(&sbi, type);
return curseg->segno;
}
unsigned char SegMgr::CursegAllocType(int type) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct curseg_info *curseg = CURSEG_I(&sbi, type);
return curseg->alloc_type;
}
inline unsigned short SegMgr::CursegBlkoff(int type) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct curseg_info *curseg = CURSEG_I(&sbi, type);
return curseg->next_blkoff;
}
inline void SegMgr::CheckSegRange(unsigned int segno) {
f2fs_sb_info &sbi = fs_->SbInfo();
[[maybe_unused]] unsigned int end_segno = SM_I(&sbi)->segment_count - 1;
ZX_ASSERT(!(segno > end_segno));
}
#if 0 // porting needed
/*
* This function is used for only debugging.
* NOTE: In future, we have to remove this function.
*/
inline void SegMgr::VerifyBlockAddr(block_t blk_addr) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct f2fs_sm_info *sm_info = SM_I(&sbi);
block_t total_blks = sm_info->segment_count << sbi.log_blocks_per_seg;
[[maybe_unused]] block_t start_addr = sm_info->seg0_blkaddr;
[[maybe_unused]] block_t end_addr = start_addr + total_blks - 1;
ZX_ASSERT(!(blk_addr < start_addr));
ZX_ASSERT(!(blk_addr > end_addr));
}
#endif
/*
* Summary block is always treated as invalid block
*/
inline void SegMgr::CheckBlockCount(int segno, struct f2fs_sit_entry *raw_sit) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct f2fs_sm_info *sm_info = SM_I(&sbi);
unsigned int end_segno = sm_info->segment_count - 1;
int valid_blocks = 0;
unsigned int i;
/* check segment usage */
ZX_ASSERT(!(GET_SIT_VBLOCKS(raw_sit) > sbi.blocks_per_seg));
/* check boundary of a given segment number */
ZX_ASSERT(!(segno > (int)end_segno));
/* check bitmap with valid block count */
for (i = 0; i < sbi.blocks_per_seg; i++)
if (f2fs_test_bit(i, (char *)raw_sit->valid_map))
valid_blocks++;
ZX_ASSERT(GET_SIT_VBLOCKS(raw_sit) == valid_blocks);
}
inline pgoff_t SegMgr::CurrentSitAddr(unsigned int start) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct sit_info *sit_i = SIT_I(&sbi);
unsigned int offset = SIT_BLOCK_OFFSET(sit_i, start);
block_t blk_addr = sit_i->sit_base_addr + offset;
CheckSegRange(start);
/* calculate sit block address */
if (f2fs_test_bit(offset, (char *)sit_i->sit_bitmap))
blk_addr += sit_i->sit_blocks;
return blk_addr;
}
inline pgoff_t SegMgr::NextSitAddr(pgoff_t block_addr) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct sit_info *sit_i = SIT_I(&sbi);
block_addr -= sit_i->sit_base_addr;
if (block_addr < sit_i->sit_blocks)
block_addr += sit_i->sit_blocks;
else
block_addr -= sit_i->sit_blocks;
return block_addr + sit_i->sit_base_addr;
}
inline void SegMgr::SetToNextSit(struct sit_info *sit_i, unsigned int start) {
unsigned int block_off = SIT_BLOCK_OFFSET(sit_i, start);
if (f2fs_test_bit(block_off, sit_i->sit_bitmap))
f2fs_clear_bit(block_off, sit_i->sit_bitmap);
else
f2fs_set_bit(block_off, sit_i->sit_bitmap);
}
inline unsigned long long SegMgr::GetMtime() {
auto cur_time = time(nullptr);
f2fs_sb_info &sbi = fs_->SbInfo();
struct sit_info *sit_i = SIT_I(&sbi);
return sit_i->elapsed_time + cur_time - sit_i->mounted_time;
return 0;
}
inline void SegMgr::SetSummary(struct f2fs_summary *sum, nid_t nid, unsigned int ofs_in_node,
unsigned char version) {
sum->nid = cpu_to_le32(nid);
sum->ofs_in_node = cpu_to_le16(ofs_in_node);
sum->version = version;
}
inline block_t SegMgr::StartSumBlock() {
f2fs_sb_info &sbi = fs_->SbInfo();
return __start_cp_addr(&sbi) + le32_to_cpu(F2FS_CKPT(&sbi)->cp_pack_start_sum);
}
inline block_t SegMgr::SumBlkAddr(int base, int type) {
f2fs_sb_info &sbi = fs_->SbInfo();
return __start_cp_addr(&sbi) + le32_to_cpu(F2FS_CKPT(&sbi)->cp_pack_total_block_count) -
(base + 1) + type;
}
/*
* functions
*/
SegMgr::SegMgr(F2fs *fs) : fs_(fs){};
int SegMgr::NeedToFlush() {
f2fs_sb_info &sbi = fs_->SbInfo();
unsigned int pages_per_sec = (1 << sbi.log_blocks_per_seg) * sbi.segs_per_sec;
int node_secs =
((get_pages(&sbi, F2FS_DIRTY_NODES) + pages_per_sec - 1) >> sbi.log_blocks_per_seg) /
sbi.segs_per_sec;
int dent_secs =
((get_pages(&sbi, F2FS_DIRTY_DENTS) + pages_per_sec - 1) >> sbi.log_blocks_per_seg) /
sbi.segs_per_sec;
if (sbi.por_doing)
return 0;
if (FreeSections() <= (unsigned int)(node_secs + 2 * dent_secs + ReservedSections()))
return 1;
return 0;
}
/**
* This function balances dirty node and dentry pages.
* In addition, it controls garbage collection.
*/
void SegMgr::F2fsBalanceFs() {
f2fs_sb_info &sbi = fs_->SbInfo();
struct writeback_control wbc = {
#if 0 // porting needed
// .nr_to_write = LONG_MAX,
// .sync_mode = WB_SYNC_ALL,
// .for_reclaim = 0,
#endif
};
if (sbi.por_doing)
return;
/*
* We should do checkpoint when there are so many dirty node pages
* with enough free segments. After then, we should do GC.
*/
if (NeedToFlush()) {
fs_->SyncDirtyDirInodes();
fs_->Nodemgr().SyncNodePages(0, &wbc);
}
// TODO: IMPL GC
if (HasNotEnoughFreeSecs() && PrefreeSegments()) {
#if 0 // porting needed
// mtx_lock(&sbi.gc_mutex);
// f2fs_gc(&sbi, 1);
#endif
fs_->WriteCheckpoint(false, false);
}
}
void SegMgr::__LocateDirtySegment(unsigned int segno, enum dirty_type dirty_type) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct dirty_seglist_info *dirty_i = DIRTY_I(&sbi);
/* need not be added */
if (IS_CURSEG(&sbi, segno))
return;
if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
dirty_i->nr_dirty[dirty_type]++;
if (dirty_type == DIRTY) {
struct seg_entry *sentry = GetSegEntry(segno);
dirty_type = static_cast<enum dirty_type>(sentry->type);
if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
dirty_i->nr_dirty[dirty_type]++;
}
}
void SegMgr::__RemoveDirtySegment(unsigned int segno, enum dirty_type dirty_type) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct dirty_seglist_info *dirty_i = DIRTY_I(&sbi);
if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
dirty_i->nr_dirty[dirty_type]--;
if (dirty_type == DIRTY) {
struct seg_entry *sentry = GetSegEntry(segno);
dirty_type = static_cast<enum dirty_type>(sentry->type);
if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
dirty_i->nr_dirty[dirty_type]--;
clear_bit(segno, dirty_i->victim_segmap[FG_GC]);
clear_bit(segno, dirty_i->victim_segmap[BG_GC]);
}
}
/**
* Should not occur error such as -ENOMEM.
* Adding dirty entry into seglist is not critical operation.
* If a given segment is one of current working segments, it won't be added.
*/
void SegMgr::LocateDirtySegment(unsigned int segno) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct dirty_seglist_info *dirty_i = DIRTY_I(&sbi);
unsigned short valid_blocks;
if (segno == NULL_SEGNO || IS_CURSEG(&sbi, segno))
return;
mtx_lock(&dirty_i->seglist_lock);
valid_blocks = GetValidBlocks(segno, 0);
if (valid_blocks == 0) {
__LocateDirtySegment(segno, PRE);
__RemoveDirtySegment(segno, DIRTY);
} else if (valid_blocks < sbi.blocks_per_seg) {
__LocateDirtySegment(segno, DIRTY);
} else {
/* Recovery routine with SSR needs this */
__RemoveDirtySegment(segno, DIRTY);
}
mtx_unlock(&dirty_i->seglist_lock);
return;
}
/**
* Should call clear_prefree_segments after checkpoint is done.
*/
void SegMgr::SetPrefreeAsFreeSegments() {
f2fs_sb_info &sbi = fs_->SbInfo();
struct dirty_seglist_info *dirty_i = DIRTY_I(&sbi);
unsigned int segno, offset = 0;
unsigned int total_segs = TOTAL_SEGS(&sbi);
mtx_lock(&dirty_i->seglist_lock);
while (true) {
segno = find_next_bit_le(dirty_i->dirty_segmap[PRE], total_segs, offset);
if (segno >= total_segs)
break;
__SetTestAndFree(segno);
offset = segno + 1;
}
mtx_unlock(&dirty_i->seglist_lock);
}
void SegMgr::ClearPrefreeSegments() {
f2fs_sb_info &sbi = fs_->SbInfo();
struct dirty_seglist_info *dirty_i = DIRTY_I(&sbi);
unsigned int segno, offset = 0;
unsigned int total_segs = TOTAL_SEGS(&sbi);
mtx_lock(&dirty_i->seglist_lock);
while (true) {
segno = find_next_bit_le(dirty_i->dirty_segmap[PRE], total_segs, offset);
if (segno >= total_segs)
break;
offset = segno + 1;
if (test_and_clear_bit(segno, dirty_i->dirty_segmap[PRE]))
dirty_i->nr_dirty[PRE]--;
#if 0 // porting needed (Trim)
/* Let's use trim */
// if (test_opt(sbi, DISCARD))
// blkdev_issue_discard(sbi->sb->s_bdev,
// START_BLOCK(sbi, segno) <<
// sbi->log_sectors_per_block,
// 1 << (sbi->log_sectors_per_block +
// sbi->log_blocks_per_seg),
// GFP_NOFS, 0);
#endif
}
mtx_unlock(&dirty_i->seglist_lock);
}
void SegMgr::__MarkSitEntryDirty(unsigned int segno) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct sit_info *sit_i = SIT_I(&sbi);
if (!test_and_set_bit_le(segno, sit_i->dirty_sentries_bitmap))
sit_i->dirty_sentries++;
}
void SegMgr::__SetSitEntryType(int type, unsigned int segno, int modified) {
struct seg_entry *se = GetSegEntry(segno);
se->type = type;
if (modified)
__MarkSitEntryDirty(segno);
}
void SegMgr::UpdateSitEntry(block_t blkaddr, int del) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct seg_entry *se;
unsigned int segno, offset;
long int new_vblocks;
segno = GET_SEGNO(&sbi, blkaddr);
se = GetSegEntry(segno);
new_vblocks = se->valid_blocks + del;
offset = GET_SEGOFF_FROM_SEG0(&sbi, blkaddr) & (sbi.blocks_per_seg - 1);
ZX_ASSERT(!((new_vblocks >> (sizeof(unsigned short) << 3) || (new_vblocks > sbi.blocks_per_seg))));
se->valid_blocks = new_vblocks;
se->mtime = GetMtime();
SIT_I(&sbi)->max_mtime = se->mtime;
/* Update valid block bitmap */
if (del > 0) {
if (f2fs_set_bit(offset, reinterpret_cast<char *>(se->cur_valid_map)))
ZX_ASSERT(0);
} else {
if (!f2fs_clear_bit(offset, reinterpret_cast<char *>(se->cur_valid_map)))
ZX_ASSERT(0);
}
if (!f2fs_test_bit(offset, reinterpret_cast<char *>(se->ckpt_valid_map)))
se->ckpt_valid_blocks += del;
__MarkSitEntryDirty(segno);
/* update total number of valid blocks to be written in ckpt area */
SIT_I(&sbi)->written_valid_blocks += del;
if (sbi.segs_per_sec > 1)
GetSecEntry(segno)->valid_blocks += del;
}
void SegMgr::RefreshSitEntry(block_t old_blkaddr, block_t new_blkaddr) {
f2fs_sb_info &sbi = fs_->SbInfo();
UpdateSitEntry(new_blkaddr, 1);
if (GET_SEGNO(&sbi, old_blkaddr) != NULL_SEGNO)
UpdateSitEntry(old_blkaddr, -1);
}
void SegMgr::InvalidateBlocks(block_t addr) {
f2fs_sb_info &sbi = fs_->SbInfo();
unsigned int segno = GET_SEGNO(&sbi, addr);
struct sit_info *sit_i = SIT_I(&sbi);
ZX_ASSERT(addr != NULL_ADDR);
if (addr == NEW_ADDR)
return;
/* add it into sit main buffer */
mtx_lock(&sit_i->sentry_lock);
UpdateSitEntry(addr, -1);
/* add it into dirty seglist */
LocateDirtySegment(segno);
mtx_unlock(&sit_i->sentry_lock);
}
/**
* This function should be resided under the curseg_mutex lock
*/
void SegMgr::__AddSumEntry(int type, struct f2fs_summary *sum, unsigned short offset) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct curseg_info *curseg = CURSEG_I(&sbi, type);
char *addr = (char *)curseg->sum_blk;
(addr) += offset * sizeof(struct f2fs_summary);
memcpy(addr, sum, sizeof(struct f2fs_summary));
}
/**
* Calculate the number of current summary pages for writing
*/
int SegMgr::NpagesForSummaryFlush() {
f2fs_sb_info &sbi = fs_->SbInfo();
int total_size_bytes = 0;
int valid_sum_count = 0;
int i, sum_space;
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
if (sbi.ckpt->alloc_type[i] == SSR)
valid_sum_count += sbi.blocks_per_seg;
else
valid_sum_count += CursegBlkoff(i);
}
total_size_bytes = valid_sum_count * (SUMMARY_SIZE + 1) + sizeof(struct nat_journal) + 2 +
sizeof(struct sit_journal) + 2;
sum_space = PAGE_CACHE_SIZE - SUM_FOOTER_SIZE;
if (total_size_bytes < sum_space)
return 1;
else if (total_size_bytes < 2 * sum_space)
return 2;
return 3;
}
/**
* Caller should put this summary page
*/
Page *SegMgr::GetSumPage(unsigned int segno) {
f2fs_sb_info &sbi = fs_->SbInfo();
return fs_->GetMetaPage(GET_SUM_BLOCK(&sbi, segno));
}
void SegMgr::WriteSumPage(struct f2fs_summary_block *sum_blk, block_t blk_addr) {
Page *page = fs_->GrabMetaPage(blk_addr);
void *kaddr = page_address(page);
memcpy(kaddr, sum_blk, PAGE_CACHE_SIZE);
#if 0 // porting needed
// set_page_dirty(page);
#endif
FlushDirtyMetaPage(fs_, page);
F2fsPutPage(page, 1);
}
unsigned int SegMgr::CheckPrefreeSegments(int ofs_unit, int type) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct dirty_seglist_info *dirty_i = DIRTY_I(&sbi);
unsigned long *prefree_segmap = dirty_i->dirty_segmap[PRE];
unsigned int segno, next_segno, i;
int ofs = 0;
/*
* If there is not enough reserved sections,
* we should not reuse prefree segments.
*/
if (HasNotEnoughFreeSecs())
return NULL_SEGNO;
/*
* NODE page should not reuse prefree segment,
* since those information is used for SPOR.
*/
if (IS_NODESEG(type))
return NULL_SEGNO;
next:
segno = find_next_bit_le(prefree_segmap, TOTAL_SEGS(&sbi), ofs++);
ofs = ((segno / ofs_unit) * ofs_unit) + ofs_unit;
if (segno < TOTAL_SEGS(&sbi)) {
/* skip intermediate segments in a section */
if (segno % ofs_unit)
goto next;
/* skip if whole section is not prefree */
next_segno = find_next_zero_bit(prefree_segmap, TOTAL_SEGS(&sbi), segno + 1);
if (next_segno - segno < static_cast<unsigned int>(ofs_unit))
goto next;
/* skip if whole section was not free at the last checkpoint */
for (i = 0; i < static_cast<unsigned int>(ofs_unit); i++)
if (GetSegEntry(segno)->ckpt_valid_blocks)
goto next;
return segno;
}
return NULL_SEGNO;
}
/**
* Find a new segment from the free segments bitmap to right order
* This function should be returned with success, otherwise BUG
*/
void SegMgr::GetNewSegment(unsigned int *newseg, bool new_sec, int dir) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct free_segmap_info *free_i = FREE_I(&sbi);
unsigned int total_secs = sbi.total_sections;
unsigned int segno, secno, zoneno;
unsigned int total_zones = sbi.total_sections / sbi.secs_per_zone;
unsigned int hint = *newseg / sbi.segs_per_sec;
unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(&sbi, *newseg);
unsigned int left_start = hint;
bool init = true;
int go_left = 0;
int i;
write_lock(&free_i->segmap_lock);
if (!new_sec && ((*newseg + 1) % sbi.segs_per_sec)) {
segno = find_next_zero_bit(free_i->free_segmap, TOTAL_SEGS(&sbi), *newseg + 1);
if (segno < TOTAL_SEGS(&sbi))
goto got_it;
}
find_other_zone:
secno = find_next_zero_bit(free_i->free_secmap, total_secs, hint);
if (secno >= total_secs) {
if (dir == ALLOC_RIGHT) {
secno = find_next_zero_bit(free_i->free_secmap, total_secs, 0);
ZX_ASSERT(!(secno >= total_secs));
} else {
go_left = 1;
left_start = hint - 1;
}
}
if (go_left == 0)
goto skip_left;
while (test_bit(left_start, free_i->free_secmap)) {
if (left_start > 0) {
left_start--;
continue;
}
left_start = find_next_zero_bit(free_i->free_secmap, total_secs, 0);
ZX_ASSERT(!(left_start >= total_secs));
break;
}
secno = left_start;
skip_left:
hint = secno;
segno = secno * sbi.segs_per_sec;
zoneno = secno / sbi.secs_per_zone;
/* give up on finding another zone */
if (!init)
goto got_it;
if (sbi.secs_per_zone == 1)
goto got_it;
if (zoneno == old_zoneno)
goto got_it;
if (dir == ALLOC_LEFT) {
if (!go_left && zoneno + 1 >= total_zones)
goto got_it;
if (go_left && zoneno == 0)
goto got_it;
}
for (i = 0; i < NR_CURSEG_TYPE; i++)
if (CURSEG_I(&sbi, i)->zone == zoneno)
break;
if (i < NR_CURSEG_TYPE) {
/* zone is in user, try another */
if (go_left)
hint = zoneno * sbi.secs_per_zone - 1;
else if (zoneno + 1 >= total_zones)
hint = 0;
else
hint = (zoneno + 1) * sbi.secs_per_zone;
init = false;
goto find_other_zone;
}
got_it:
/* set it as dirty segment in free segmap */
ZX_ASSERT(!test_bit(segno, free_i->free_segmap));
__SetInuse(segno);
*newseg = segno;
write_unlock(&free_i->segmap_lock);
}
void SegMgr::ResetCurseg(int type, int modified) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct curseg_info *curseg = CURSEG_I(&sbi, type);
struct summary_footer *sum_footer;
curseg->segno = curseg->next_segno;
curseg->zone = GET_ZONENO_FROM_SEGNO(&sbi, curseg->segno);
curseg->next_blkoff = 0;
curseg->next_segno = NULL_SEGNO;
sum_footer = &(curseg->sum_blk->footer);
memset(sum_footer, 0, sizeof(struct summary_footer));
if (IS_DATASEG(type))
SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
if (IS_NODESEG(type))
SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
__SetSitEntryType(type, curseg->segno, modified);
}
/**
* Allocate a current working segment.
* This function always allocates a free segment in LFS manner.
*/
void SegMgr::NewCurseg(int type, bool new_sec) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct curseg_info *curseg = CURSEG_I(&sbi, type);
unsigned int segno = curseg->segno;
int dir = ALLOC_LEFT;
WriteSumPage(curseg->sum_blk, GET_SUM_BLOCK(&sbi, curseg->segno));
if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
dir = ALLOC_RIGHT;
if (test_opt(&sbi, NOHEAP))
dir = ALLOC_RIGHT;
GetNewSegment(&segno, new_sec, dir);
curseg->next_segno = segno;
ResetCurseg(type, 1);
curseg->alloc_type = LFS;
}
void SegMgr::__NextFreeBlkoff(struct curseg_info *seg, block_t start) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct seg_entry *se = GetSegEntry(seg->segno);
block_t ofs;
for (ofs = start; ofs < sbi.blocks_per_seg; ofs++) {
if (!f2fs_test_bit(ofs, (char *)se->ckpt_valid_map) &&
!f2fs_test_bit(ofs, (char *)se->cur_valid_map))
break;
}
seg->next_blkoff = ofs;
}
/**
* If a segment is written by LFS manner, next block offset is just obtained
* by increasing the current block offset. However, if a segment is written by
* SSR manner, next block offset obtained by calling __next_free_blkoff
*/
void SegMgr::__RefreshNextBlkoff(struct curseg_info *seg) {
if (seg->alloc_type == SSR) {
__NextFreeBlkoff(seg, seg->next_blkoff + 1);
} else {
seg->next_blkoff++;
}
}
/**
* This function always allocates a used segment (from dirty seglist) by SSR
* manner, so it should recover the existing segment information of valid blocks
*/
void SegMgr::ChangeCurseg(int type, bool reuse) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct dirty_seglist_info *dirty_i = DIRTY_I(&sbi);
struct curseg_info *curseg = CURSEG_I(&sbi, type);
unsigned int new_segno = curseg->next_segno;
struct f2fs_summary_block *sum_node;
struct Page *sum_page;
WriteSumPage(curseg->sum_blk, GET_SUM_BLOCK(&sbi, curseg->segno));
__SetTestAndInuse(new_segno);
mtx_lock(&dirty_i->seglist_lock);
__RemoveDirtySegment(new_segno, PRE);
__RemoveDirtySegment(new_segno, DIRTY);
mtx_unlock(&dirty_i->seglist_lock);
ResetCurseg(type, 1);
curseg->alloc_type = SSR;
__NextFreeBlkoff(curseg, 0);
if (reuse) {
sum_page = GetSumPage(new_segno);
sum_node = (struct f2fs_summary_block *)page_address(sum_page);
memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
F2fsPutPage(sum_page, 1);
}
}
/*
* flush out current segment and replace it with new segment
* This function should be returned with success, otherwise BUG
*/
void SegMgr::AllocateSegmentByDefault(int type, bool force) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct curseg_info *curseg = CURSEG_I(&sbi, type);
// unsigned int ofs_unit;
if (force) {
NewCurseg(type, true);
goto out;
}
// TODO: BUG (we can get next_segno from prefree_segment only after checkpoint)
// ofs_unit = NeedSSR() ? 1 : sbi.segs_per_sec;
// curseg->next_segno = CheckPrefreeSegments(ofs_unit, type);
// if (curseg->next_segno != NULL_SEGNO)
// ChangeCurseg(type, false);
// else
if (type == CURSEG_WARM_NODE) {
NewCurseg(type, false);
} else if (false) {
#if 0 // porting needed
// TODO: IMPL (SSR)
//} else if (NeedSSR() && GetSsrSegment(type)) {
#endif
ChangeCurseg(type, true);
} else {
NewCurseg(type, false);
}
out:
sbi.segment_count[curseg->alloc_type]++;
#ifdef F2FS_BU_DEBUG
std::cout << "SegMgr::AllocateSegmentByDefault, type=" << type
<< ", curseg->segno =" << curseg->segno
<< ", FreeSections()=" << FreeSections()
<< ", PrefreeSegments()=" << PrefreeSegments()
<< ", DirtySegments()=" << DirtySegments()
<< ", TOTAL_SEGS=" << TOTAL_SEGS(&sbi)
<< ", Utilization()=" << Utilization()
<< std::endl;
#endif
}
void SegMgr::AllocateNewSegments() {
f2fs_sb_info &sbi = fs_->SbInfo();
struct curseg_info *curseg;
unsigned int old_curseg;
int i;
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
curseg = CURSEG_I(&sbi, i);
old_curseg = curseg->segno;
SIT_I(&sbi)->s_ops->allocate_segment(&sbi, i, true);
LocateDirtySegment(old_curseg);
}
}
#if 0 // porting needed
/*
const struct segment_allocation default_salloc_ops = {
.allocate_segment = AllocateSegmentByDefault,
};
*/
#endif
#if 0 // porting needed (bio)
void SegMgr::F2fsEndIoWrite(struct bio *bio, int err) {
// const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
// struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
// struct bio_private *p = bio->bi_private;
// do {
// struct page *page = bvec->bv_page;
// if (--bvec >= bio->bi_io_vec)
// prefetchw(&bvec->bv_page->flags);
// if (!uptodate) {
// SetPageError(page);
// if (page->mapping)
// set_bit(AS_EIO, &page->mapping->flags);
// p->sbi->ckpt->ckpt_flags |= CP_ERROR_FLAG;
// set_page_dirty(page);
// }
// end_page_writeback(page);
// dec_page_count(p->sbi, F2FS_WRITEBACK);
// } while (bvec >= bio->bi_io_vec);
// if (p->is_sync)
// complete(p->wait);
// kfree(p);
// bio_put(bio);
}
#endif
#if 0 // porting needed (bio)
struct bio *SegMgr::F2fsBioAlloc(struct block_device *bdev, sector_t first_sector, int nr_vecs,
gfp_t gfp_flags) {
// struct bio *bio;
// repeat:
// /* allocate new bio */
// bio = bio_alloc(gfp_flags, nr_vecs);
// if (bio == NULL && (current->flags & PF_MEMALLOC)) {
// while (!bio && (nr_vecs /= 2))
// bio = bio_alloc(gfp_flags, nr_vecs);
// }
// if (bio) {
// bio->bi_bdev = bdev;
// bio->bi_sector = first_sector;
// retry:
// bio->bi_private = kmalloc(sizeof(struct bio_private),
// GFP_NOFS | __GFP_HIGH);
// if (!bio->bi_private) {
// cond_resched();
// goto retry;
// }
// }
// if (bio == NULL) {
// cond_resched();
// goto repeat;
// }
// return bio;
return nullptr;
}
#endif
#if 0 // porting needed (bio)
void SegMgr::DoSubmitBio(enum page_type type, bool sync) {
// int rw = sync ? WRITE_SYNC : WRITE;
// enum page_type btype = type > META ? META : type;
// if (type >= META_FLUSH)
// rw = WRITE_FLUSH_FUA;
// if (sbi->bio[btype]) {
// struct bio_private *p = sbi->bio[btype]->bi_private;
// p->sbi = sbi;
// sbi->bio[btype]->bi_end_io = f2fs_end_io_write;
// if (type == META_FLUSH) {
// DECLARE_COMPLETION_ONSTACK(wait);
// p->is_sync = true;
// p->wait = &wait;
// submit_bio(rw, sbi->bio[btype]);
// wait_for_completion(&wait);
// } else {
// p->is_sync = false;
// submit_bio(rw, sbi->bio[btype]);
// }
// sbi->bio[btype] = NULL;
// }
}
#endif
#if 0 // porting needed (bio)
void SegMgr::F2fsSubmitBio(enum page_type type, bool sync) {
// down_write(&sbi->bio_sem);
// DoSubmitBio(type, sync);
// up_write(&sbi->bio_sem);
}
#endif
void SegMgr::SubmitWritePage(Page *page, block_t blk_addr, enum page_type type) {
zx_status_t ret = fs_->bc_->Writeblk(blk_addr, page->data);
if (ret) {
std::cout << "SubmitWritePage error " << ret << std::endl;
}
#if 0 // porting needed (bio)
// fs_->bc_->Sync();
// struct block_device *bdev = sbi->sb->s_bdev;
// verify_block_addr(sbi, blk_addr);
// down_write(&sbi->bio_sem);
// inc_page_count(sbi, F2FS_WRITEBACK);
// if (sbi->bio[type] && sbi->last_block_in_bio[type] != blk_addr - 1)
// do_submit_bio(sbi, type, false);
// alloc_new:
// if (sbi->bio[type] == NULL)
// sbi->bio[type] = f2fs_bio_alloc(bdev,
// blk_addr << (sbi->log_blocksize - 9),
// bio_get_nr_vecs(bdev), GFP_NOFS | __GFP_HIGH);
// if (bio_add_page(sbi->bio[type], page, PAGE_CACHE_SIZE, 0) <
// PAGE_CACHE_SIZE) {
// do_submit_bio(sbi, type, false);
// goto alloc_new;
// }
// sbi->last_block_in_bio[type] = blk_addr;
// up_write(&sbi->bio_sem);
#endif
}
bool SegMgr::__HasCursegSpace(int type) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct curseg_info *curseg = CURSEG_I(&sbi, type);
if (curseg->next_blkoff < sbi.blocks_per_seg) {
return true;
}
return false;
}
int SegMgr::__GetSegmentType2(Page *page, enum page_type p_type) {
if (p_type == DATA)
return CURSEG_HOT_DATA;
else
return CURSEG_HOT_NODE;
}
int SegMgr::__GetSegmentType4(Page *page, enum page_type p_type) {
if (p_type == DATA) {
VnodeF2fs *vnode = static_cast<f2fs::VnodeF2fs *>(page->host);
if (S_ISDIR(vnode->i_mode))
return CURSEG_HOT_DATA;
else
return CURSEG_COLD_DATA;
} else {
if (fs_->Nodemgr().IS_DNODE(page) && !NodeMgr::IsColdNode(page))
return CURSEG_HOT_NODE;
else
return CURSEG_COLD_NODE;
return 0;
}
return 0;
}
int SegMgr::__GetSegmentType6(Page *page, enum page_type p_type) {
if (p_type == DATA) {
VnodeF2fs *vnode = static_cast<f2fs::VnodeF2fs *>(page->host);
if (S_ISDIR(vnode->i_mode))
return CURSEG_HOT_DATA;
else if (NodeMgr::IsColdData(page) || NodeMgr::IsColdFile(vnode))
return CURSEG_COLD_DATA;
else
return CURSEG_WARM_DATA;
} else {
if (fs_->Nodemgr().IS_DNODE(page))
return NodeMgr::IsColdNode(page) ? CURSEG_WARM_NODE : CURSEG_HOT_NODE;
else
return CURSEG_COLD_NODE;
}
return 0;
}
int SegMgr::__GetSegmentType(Page *page, enum page_type p_type) {
f2fs_sb_info &sbi = fs_->SbInfo();
switch (sbi.active_logs) {
case 2:
return __GetSegmentType2(page, p_type);
case 4:
return __GetSegmentType4(page, p_type);
case 6:
return __GetSegmentType6(page, p_type);
default:
ZX_ASSERT(0);
}
}
void SegMgr::DoWritePage(Page *page, block_t old_blkaddr, block_t *new_blkaddr,
struct f2fs_summary *sum, enum page_type p_type) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct sit_info *sit_i = SIT_I(&sbi);
struct curseg_info *curseg;
unsigned int old_cursegno;
int type;
type = __GetSegmentType(page, p_type);
curseg = CURSEG_I(&sbi, type);
mtx_lock(&curseg->curseg_mutex);
*new_blkaddr = NEXT_FREE_BLKADDR(&sbi, curseg);
old_cursegno = curseg->segno;
/*
* __add_sum_entry should be resided under the curseg_mutex
* because, this function updates a summary entry in the
* current summary block.
*/
__AddSumEntry(type, sum, curseg->next_blkoff);
mtx_lock(&sit_i->sentry_lock);
__RefreshNextBlkoff(curseg);
sbi.block_count[curseg->alloc_type]++;
/*
* SIT information should be updated before segment allocation,
* since SSR needs latest valid block information.
*/
RefreshSitEntry(old_blkaddr, *new_blkaddr);
if (!__HasCursegSpace(type)) {
#if 0 // porting needed
// sit_i->s_ops->allocate_segment(&sbi, type, false);
#endif
AllocateSegmentByDefault(type, false);
}
LocateDirtySegment(old_cursegno);
LocateDirtySegment(GET_SEGNO(&sbi, old_blkaddr));
mtx_unlock(&sit_i->sentry_lock);
if (p_type == NODE)
fs_->Nodemgr().FillNodeFooterBlkaddr(page, NEXT_FREE_BLKADDR(&sbi, curseg));
/* writeout dirty page into bdev */
SubmitWritePage(page, *new_blkaddr, p_type);
mtx_unlock(&curseg->curseg_mutex);
}
int SegMgr::WriteMetaPage(Page *page, struct writeback_control *wbc) {
#if 0 // porting needed
// if (wbc && wbc->for_reclaim)
// return AOP_WRITEPAGE_ACTIVATE;
#endif
set_page_writeback(page);
SubmitWritePage(page, page->index, META);
return 0;
}
void SegMgr::WriteNodePage(Page *page, unsigned int nid, block_t old_blkaddr,
block_t *new_blkaddr) {
struct f2fs_summary sum;
SetSummary(&sum, nid, 0, 0);
DoWritePage(page, old_blkaddr, new_blkaddr, &sum, NODE);
}
void SegMgr::WriteDataPage(VnodeF2fs *vnode, Page *page, struct dnode_of_data *dn,
block_t old_blkaddr, block_t *new_blkaddr) {
struct f2fs_summary sum;
struct node_info ni;
ZX_ASSERT(old_blkaddr != NULL_ADDR);
fs_->Nodemgr().GetNodeInfo(dn->nid, &ni);
SetSummary(&sum, dn->nid, dn->ofs_in_node, ni.version);
DoWritePage(page, old_blkaddr, new_blkaddr, &sum, DATA);
}
void SegMgr::RewriteDataPage(Page *page, block_t old_blk_addr) {
SubmitWritePage(page, old_blk_addr, DATA);
}
void SegMgr::RecoverDataPage(Page *page, struct f2fs_summary *sum, block_t old_blkaddr,
block_t new_blkaddr) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct sit_info *sit_i = SIT_I(&sbi);
struct curseg_info *curseg;
unsigned int segno, old_cursegno;
struct seg_entry *se;
int type;
segno = GET_SEGNO(&sbi, new_blkaddr);
se = GetSegEntry(segno);
type = se->type;
if (se->valid_blocks == 0 && !IS_CURSEG(&sbi, segno)) {
if (old_blkaddr == NULL_ADDR) {
type = CURSEG_COLD_DATA;
} else {
type = CURSEG_WARM_DATA;
}
}
curseg = CURSEG_I(&sbi, type);
mtx_lock(&curseg->curseg_mutex);
mtx_lock(&sit_i->sentry_lock);
old_cursegno = curseg->segno;
/* change the current segment */
if (segno != curseg->segno) {
curseg->next_segno = segno;
ChangeCurseg(type, true);
}
curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(&sbi, new_blkaddr) & (sbi.blocks_per_seg - 1);
__AddSumEntry(type, sum, curseg->next_blkoff);
RefreshSitEntry(old_blkaddr, new_blkaddr);
LocateDirtySegment(old_cursegno);
LocateDirtySegment(GET_SEGNO(&sbi, old_blkaddr));
mtx_unlock(&sit_i->sentry_lock);
mtx_unlock(&curseg->curseg_mutex);
}
void SegMgr::RewriteNodePage(Page *page, struct f2fs_summary *sum, block_t old_blkaddr,
block_t new_blkaddr) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct sit_info *sit_i = SIT_I(&sbi);
int type = CURSEG_WARM_NODE;
struct curseg_info *curseg;
unsigned int segno, old_cursegno;
block_t next_blkaddr = NodeMgr::NextBlkaddrOfNode(page);
unsigned int next_segno = GET_SEGNO(&sbi, next_blkaddr);
curseg = CURSEG_I(&sbi, type);
mtx_lock(&curseg->curseg_mutex);
mtx_lock(&sit_i->sentry_lock);
segno = GET_SEGNO(&sbi, new_blkaddr);
old_cursegno = curseg->segno;
/* change the current segment */
if (segno != curseg->segno) {
curseg->next_segno = segno;
ChangeCurseg(type, true);
}
curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(&sbi, new_blkaddr) & (sbi.blocks_per_seg - 1);
__AddSumEntry(type, sum, curseg->next_blkoff);
/* change the current log to the next block addr in advance */
if (next_segno != segno) {
curseg->next_segno = next_segno;
ChangeCurseg(type, true);
}
curseg->next_blkoff = GET_SEGOFF_FROM_SEG0(&sbi, next_blkaddr) & (sbi.blocks_per_seg - 1);
/* rewrite node page */
set_page_writeback(page);
SubmitWritePage(page, new_blkaddr, NODE);
#if 0 // porting needed
F2fsSubmitBio(NODE, true);
#endif
RefreshSitEntry(old_blkaddr, new_blkaddr);
LocateDirtySegment(old_cursegno);
LocateDirtySegment(GET_SEGNO(&sbi, old_blkaddr));
mtx_unlock(&sit_i->sentry_lock);
mtx_unlock(&curseg->curseg_mutex);
}
int SegMgr::ReadCompactedSummaries() {
f2fs_sb_info &sbi = fs_->SbInfo();
struct f2fs_checkpoint *ckpt = F2FS_CKPT(&sbi);
struct curseg_info *seg_i;
unsigned char *kaddr;
Page *page;
block_t start;
int i, j, offset;
start = StartSumBlock();
page = fs_->GetMetaPage(start++);
kaddr = static_cast<unsigned char *>(page_address(page));
/* Step 1: restore nat cache */
seg_i = CURSEG_I(&sbi, CURSEG_HOT_DATA);
memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE);
/* Step 2: restore sit cache */
seg_i = CURSEG_I(&sbi, CURSEG_COLD_DATA);
memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
offset = 2 * SUM_JOURNAL_SIZE;
/* Step 3: restore summary entries */
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
unsigned short blk_off;
unsigned int segno;
seg_i = CURSEG_I(&sbi, i);
segno = le32_to_cpu(ckpt->cur_data_segno[i]);
blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
seg_i->next_segno = segno;
ResetCurseg(i, 0);
seg_i->alloc_type = ckpt->alloc_type[i];
seg_i->next_blkoff = blk_off;
if (seg_i->alloc_type == SSR)
blk_off = sbi.blocks_per_seg;
for (j = 0; j < blk_off; j++) {
struct f2fs_summary *s;
s = reinterpret_cast<struct f2fs_summary *>(kaddr + offset);
seg_i->sum_blk->entries[j] = *s;
offset += SUMMARY_SIZE;
if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE - SUM_FOOTER_SIZE)
continue;
F2fsPutPage(page, 1);
page = nullptr;
page = fs_->GetMetaPage(start++);
kaddr = static_cast<unsigned char *>(page_address(page));
offset = 0;
}
}
F2fsPutPage(page, 1);
return 0;
}
int SegMgr::ReadNormalSummaries(int type) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct f2fs_checkpoint *ckpt = F2FS_CKPT(&sbi);
struct f2fs_summary_block *sum;
struct curseg_info *curseg;
Page *new_page;
unsigned short blk_off;
unsigned int segno = 0;
block_t blk_addr = 0;
/* get segment number and block addr */
if (IS_DATASEG(type)) {
segno = le32_to_cpu(ckpt->cur_data_segno[type]);
blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type - CURSEG_HOT_DATA]);
if (ckpt->ckpt_flags & CP_UMOUNT_FLAG) {
blk_addr = SumBlkAddr(NR_CURSEG_TYPE, type);
} else
blk_addr = SumBlkAddr(NR_CURSEG_DATA_TYPE, type);
} else {
segno = le32_to_cpu(ckpt->cur_node_segno[type - CURSEG_HOT_NODE]);
blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type - CURSEG_HOT_NODE]);
if (ckpt->ckpt_flags & CP_UMOUNT_FLAG) {
blk_addr = SumBlkAddr(NR_CURSEG_NODE_TYPE, type - CURSEG_HOT_NODE);
} else
blk_addr = GET_SUM_BLOCK(&sbi, segno);
}
new_page = fs_->GetMetaPage(blk_addr);
sum = (struct f2fs_summary_block *)page_address(new_page);
if (IS_NODESEG(type)) {
if (ckpt->ckpt_flags & CP_UMOUNT_FLAG) {
struct f2fs_summary *ns = &sum->entries[0];
unsigned int i;
for (i = 0; i < sbi.blocks_per_seg; i++, ns++) {
ns->version = 0;
ns->ofs_in_node = 0;
}
} else {
if (NodeMgr::RestoreNodeSummary(fs_, segno, sum)) {
F2fsPutPage(new_page, 1);
return -EINVAL;
}
}
}
/* set uncompleted segment to curseg */
curseg = CURSEG_I(&sbi, type);
mtx_lock(&curseg->curseg_mutex);
memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE);
curseg->next_segno = segno;
ResetCurseg(type, 0);
curseg->alloc_type = ckpt->alloc_type[type];
curseg->next_blkoff = blk_off;
mtx_unlock(&curseg->curseg_mutex);
F2fsPutPage(new_page, 1);
return 0;
}
int SegMgr::RestoreCursegSummaries() {
f2fs_sb_info &sbi = fs_->SbInfo();
int type = CURSEG_HOT_DATA;
if (sbi.ckpt->ckpt_flags & CP_COMPACT_SUM_FLAG) {
/* restore for compacted data summary */
if (ReadCompactedSummaries())
return -EINVAL;
type = CURSEG_HOT_NODE;
}
for (; type <= CURSEG_COLD_NODE; type++) {
if (ReadNormalSummaries(type))
return -EINVAL;
}
return 0;
}
void SegMgr::WriteCompactedSummaries(block_t blkaddr) {
f2fs_sb_info &sbi = fs_->SbInfo();
Page *page;
unsigned char *kaddr;
struct f2fs_summary *summary;
struct curseg_info *seg_i;
int written_size = 0;
int i, j;
page = fs_->GrabMetaPage(blkaddr++);
kaddr = (unsigned char *)page_address(page);
/* Step 1: write nat cache */
seg_i = CURSEG_I(&sbi, CURSEG_HOT_DATA);
memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE);
written_size += SUM_JOURNAL_SIZE;
/* Step 2: write sit cache */
seg_i = CURSEG_I(&sbi, CURSEG_COLD_DATA);
memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits, SUM_JOURNAL_SIZE);
written_size += SUM_JOURNAL_SIZE;
// set_page_dirty(page);
FlushDirtyMetaPage(fs_, page);
/* Step 3: write summary entries */
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
unsigned short blkoff;
seg_i = CURSEG_I(&sbi, i);
if (sbi.ckpt->alloc_type[i] == SSR)
blkoff = sbi.blocks_per_seg;
else
blkoff = CursegBlkoff(i);
for (j = 0; j < blkoff; j++) {
if (!page) {
page = fs_->GrabMetaPage(blkaddr++);
kaddr = (unsigned char *)page_address(page);
written_size = 0;
}
summary = (struct f2fs_summary *)(kaddr + written_size);
*summary = seg_i->sum_blk->entries[j];
written_size += SUMMARY_SIZE;
#if 0 // porting needed
// set_page_dirty(page);
#endif
FlushDirtyMetaPage(fs_, page);
if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE - SUM_FOOTER_SIZE)
continue;
F2fsPutPage(page, 1);
page = nullptr;
}
}
if (page)
F2fsPutPage(page, 1);
}
void SegMgr::WriteNormalSummaries(block_t blkaddr, int type) {
f2fs_sb_info &sbi = fs_->SbInfo();
int i, end;
if (IS_DATASEG(type))
end = type + NR_CURSEG_DATA_TYPE;
else
end = type + NR_CURSEG_NODE_TYPE;
for (i = type; i < end; i++) {
struct curseg_info *sum = CURSEG_I(&sbi, i);
mtx_lock(&sum->curseg_mutex);
WriteSumPage(sum->sum_blk, blkaddr + (i - type));
mtx_unlock(&sum->curseg_mutex);
}
}
void SegMgr::WriteDataSummaries(block_t start_blk) {
f2fs_sb_info &sbi = fs_->SbInfo();
if (sbi.ckpt->ckpt_flags & CP_COMPACT_SUM_FLAG)
WriteCompactedSummaries(start_blk);
else
WriteNormalSummaries(start_blk, CURSEG_HOT_DATA);
}
void SegMgr::WriteNodeSummaries(block_t start_blk) {
f2fs_sb_info &sbi = fs_->SbInfo();
if (sbi.ckpt->ckpt_flags & CP_UMOUNT_FLAG)
WriteNormalSummaries(start_blk, CURSEG_HOT_NODE);
}
int SegMgr::LookupJournalInCursum(struct f2fs_summary_block *sum, int type, unsigned int val,
int alloc) {
int i;
if (type == NAT_JOURNAL) {
for (i = 0; i < nats_in_cursum(sum); i++) {
if (le32_to_cpu(nid_in_journal(sum, i)) == val)
return i;
}
if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES)
return update_nats_in_cursum(sum, 1);
} else if (type == SIT_JOURNAL) {
for (i = 0; i < sits_in_cursum(sum); i++) {
if (le32_to_cpu(segno_in_journal(sum, i)) == val)
return i;
}
if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES)
return update_sits_in_cursum(sum, 1);
}
return -1;
}
Page *SegMgr::GetCurrentSitPage(unsigned int segno) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct sit_info *sit_i = SIT_I(&sbi);
unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
block_t blk_addr = sit_i->sit_base_addr + offset;
CheckSegRange(segno);
/* calculate sit block address */
if (f2fs_test_bit(offset, sit_i->sit_bitmap))
blk_addr += sit_i->sit_blocks;
return fs_->GetMetaPage(blk_addr);
}
Page *SegMgr::GetNextSitPage(unsigned int start) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct sit_info *sit_i = SIT_I(&sbi);
Page *src_page, *dst_page;
pgoff_t src_off, dst_off;
void *src_addr, *dst_addr;
src_off = CurrentSitAddr(start);
dst_off = NextSitAddr(src_off);
/* get current sit block page without lock */
src_page = fs_->GetMetaPage(src_off);
dst_page = fs_->GrabMetaPage(dst_off);
ZX_ASSERT(!PageDirty(src_page));
src_addr = page_address(src_page);
dst_addr = page_address(dst_page);
memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
#if 0 // porting needed
// set_page_dirty(dst_page);
#endif
F2fsPutPage(src_page, 1);
SetToNextSit(sit_i, start);
return dst_page;
}
bool SegMgr::FlushSitsInJournal() {
f2fs_sb_info &sbi = fs_->SbInfo();
struct curseg_info *curseg = CURSEG_I(&sbi, CURSEG_COLD_DATA);
struct f2fs_summary_block *sum = curseg->sum_blk;
int i;
/*
* If the journal area in the current summary is full of sit entries,
* all the sit entries will be flushed. Otherwise the sit entries
* are not able to replace with newly hot sit entries.
*/
if (sits_in_cursum(sum) >= SIT_JOURNAL_ENTRIES) {
for (i = sits_in_cursum(sum) - 1; i >= 0; i--) {
unsigned int segno;
segno = le32_to_cpu(segno_in_journal(sum, i));
__MarkSitEntryDirty(segno);
}
update_sits_in_cursum(sum, -sits_in_cursum(sum));
return 1;
}
return 0;
}
/**
* CP calls this function, which flushes SIT entries including sit_journal,
* and moves prefree segs to free segs.
*/
void SegMgr::FlushSitEntries() {
f2fs_sb_info &sbi = fs_->SbInfo();
struct sit_info *sit_i = SIT_I(&sbi);
unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
struct curseg_info *curseg = CURSEG_I(&sbi, CURSEG_COLD_DATA);
struct f2fs_summary_block *sum = curseg->sum_blk;
unsigned long nsegs = TOTAL_SEGS(&sbi);
Page *page = nullptr;
struct f2fs_sit_block *raw_sit = nullptr;
unsigned int start = 0, end = 0;
unsigned int segno = -1;
bool flushed;
mtx_lock(&curseg->curseg_mutex);
mtx_lock(&sit_i->sentry_lock);
/*
* "flushed" indicates whether sit entries in journal are flushed
* to the SIT area or not.
*/
flushed = FlushSitsInJournal();
while ((segno = find_next_bit_le(bitmap, nsegs, segno + 1)) < nsegs) {
struct seg_entry *se = GetSegEntry(segno);
int sit_offset, offset;
sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
if (flushed)
goto to_sit_page;
offset = LookupJournalInCursum(sum, SIT_JOURNAL, segno, 1);
if (offset >= 0) {
segno_in_journal(sum, offset) = cpu_to_le32(segno);
SegInfoToRawSit(se, &sit_in_journal(sum, offset));
goto flush_done;
}
to_sit_page:
if (!page || (start > segno) || (segno > end)) {
if (page) {
// set_page_dirty(page, fs_);
FlushDirtyMetaPage(fs_, page);
F2fsPutPage(page, 1);
page = nullptr;
}
start = START_SEGNO(sit_i, segno);
end = start + SIT_ENTRY_PER_BLOCK - 1;
/* read sit block that will be updated */
page = GetNextSitPage(start);
raw_sit = (struct f2fs_sit_block *)page_address(page);
}
/* udpate entry in SIT block */
SegInfoToRawSit(se, &raw_sit->entries[sit_offset]);
flush_done:
__clear_bit(segno, bitmap);
sit_i->dirty_sentries--;
}
mtx_unlock(&sit_i->sentry_lock);
mtx_unlock(&curseg->curseg_mutex);
/* writeout last modified SIT block */
#if 0 // porting needed
// set_page_dirty(page, fs_);
#endif
FlushDirtyMetaPage(fs_, page);
F2fsPutPage(page, 1);
SetPrefreeAsFreeSegments();
}
/*
* Build
*/
int SegMgr::BuildSitInfo() {
f2fs_sb_info &sbi = fs_->SbInfo();
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(&sbi);
struct f2fs_checkpoint *ckpt = F2FS_CKPT(&sbi);
struct sit_info *sit_i;
unsigned int sit_segs, start;
char *src_bitmap, *dst_bitmap;
unsigned int bitmap_size;
/* allocate memory for SIT information */
sit_i = static_cast<sit_info *>(malloc(sizeof(struct sit_info)));
memset(sit_i, 0, sizeof(struct sit_info));
if (!sit_i)
return -ENOMEM;
SM_I(&sbi)->sit_info = sit_i;
sit_i->sentries = static_cast<seg_entry *>(calloc(TOTAL_SEGS(&sbi), sizeof(struct seg_entry)));
if (!sit_i->sentries)
return -ENOMEM;
bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(&sbi));
sit_i->dirty_sentries_bitmap = static_cast<unsigned long *>(malloc(bitmap_size));
memset(sit_i->dirty_sentries_bitmap, 0, bitmap_size);
if (!sit_i->dirty_sentries_bitmap)
return -ENOMEM;
for (start = 0; start < TOTAL_SEGS(&sbi); start++) {
sit_i->sentries[start].cur_valid_map =
static_cast<unsigned char *>(malloc(SIT_VBLOCK_MAP_SIZE));
memset(sit_i->sentries[start].cur_valid_map, 0, SIT_VBLOCK_MAP_SIZE);
sit_i->sentries[start].ckpt_valid_map =
static_cast<unsigned char *>(malloc(SIT_VBLOCK_MAP_SIZE));
memset(sit_i->sentries[start].ckpt_valid_map, 0, SIT_VBLOCK_MAP_SIZE);
if (!sit_i->sentries[start].cur_valid_map || !sit_i->sentries[start].ckpt_valid_map)
return -ENOMEM;
}
if (sbi.segs_per_sec > 1) {
sit_i->sec_entries =
static_cast<sec_entry *>(calloc(sbi.total_sections, sizeof(struct sec_entry)));
if (!sit_i->sec_entries)
return -ENOMEM;
}
/* get information related with SIT */
sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
/* setup SIT bitmap from ckeckpoint pack */
bitmap_size = __bitmap_size(&sbi, SIT_BITMAP);
src_bitmap = (char *)__bitmap_ptr(&sbi, SIT_BITMAP);
dst_bitmap = static_cast<char *>(malloc(bitmap_size));
memset(dst_bitmap, 0, bitmap_size);
if (!dst_bitmap)
return -ENOMEM;
memcpy(dst_bitmap, src_bitmap, bitmap_size);
#if 0 // porting needed
/* init SIT information */
// sit_i->s_ops = &default_salloc_ops;
#endif
auto cur_time = time(nullptr);
sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
sit_i->sit_blocks = sit_segs << sbi.log_blocks_per_seg;
sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count);
sit_i->sit_bitmap = dst_bitmap;
sit_i->bitmap_size = bitmap_size;
sit_i->dirty_sentries = 0;
sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
sit_i->elapsed_time = le64_to_cpu(sbi.ckpt->elapsed_time);
sit_i->mounted_time = cur_time;
mtx_init(&sit_i->sentry_lock, mtx_plain);
return 0;
}
int SegMgr::BuildFreeSegmap() {
f2fs_sb_info &sbi = fs_->SbInfo();
struct f2fs_sm_info *sm_info = SM_I(&sbi);
struct free_segmap_info *free_i;
unsigned int bitmap_size, sec_bitmap_size;
/* allocate memory for free segmap information */
free_i = static_cast<struct free_segmap_info *>(malloc(sizeof(struct free_segmap_info)));
memset(free_i, 0, sizeof(struct free_segmap_info));
if (!free_i)
return -ENOMEM;
SM_I(&sbi)->free_info = free_i;
bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(&sbi));
free_i->free_segmap = static_cast<unsigned long *>(malloc(bitmap_size));
if (!free_i->free_segmap)
return -ENOMEM;
sec_bitmap_size = f2fs_bitmap_size(sbi.total_sections);
free_i->free_secmap = static_cast<unsigned long *>(malloc(sec_bitmap_size));
if (!free_i->free_secmap)
return -ENOMEM;
/* set all segments as dirty temporarily */
memset(free_i->free_segmap, 0xff, bitmap_size);
memset(free_i->free_secmap, 0xff, sec_bitmap_size);
/* init free segmap information */
free_i->start_segno = (unsigned int)GET_SEGNO_FROM_SEG0(&sbi, sm_info->main_blkaddr);
free_i->free_segments = 0;
free_i->free_sections = 0;
rwlock_init(&free_i->segmap_lock);
return 0;
}
int SegMgr::BuildCurseg() {
f2fs_sb_info &sbi = fs_->SbInfo();
curseg_info *array = nullptr;
int i;
array = static_cast<curseg_info *>(calloc(NR_CURSEG_TYPE, sizeof(*array)));
if (!array)
return -ENOMEM;
SM_I(&sbi)->curseg_array = array;
for (i = 0; i < NR_CURSEG_TYPE; i++) {
mtx_init(&array[i].curseg_mutex, mtx_plain);
array[i].sum_blk = static_cast<f2fs_summary_block *>(malloc(PAGE_CACHE_SIZE));
memset(array[i].sum_blk, 0, PAGE_CACHE_SIZE);
if (!array[i].sum_blk)
return -ENOMEM;
array[i].segno = NULL_SEGNO;
array[i].next_blkoff = 0;
}
return RestoreCursegSummaries();
}
void SegMgr::BuildSitEntries() {
f2fs_sb_info &sbi = fs_->SbInfo();
struct sit_info *sit_i = SIT_I(&sbi);
struct curseg_info *curseg = CURSEG_I(&sbi, CURSEG_COLD_DATA);
struct f2fs_summary_block *sum = curseg->sum_blk;
unsigned int start;
for (start = 0; start < TOTAL_SEGS(&sbi); start++) {
struct seg_entry *se = &sit_i->sentries[start];
struct f2fs_sit_block *sit_blk;
struct f2fs_sit_entry sit;
Page *page;
int i;
mtx_lock(&curseg->curseg_mutex);
for (i = 0; i < sits_in_cursum(sum); i++) {
if (le32_to_cpu(segno_in_journal(sum, i)) == start) {
sit = sit_in_journal(sum, i);
mtx_unlock(&curseg->curseg_mutex);
goto got_it;
}
}
mtx_unlock(&curseg->curseg_mutex);
page = GetCurrentSitPage(start);
sit_blk = (struct f2fs_sit_block *)page_address(page);
sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
F2fsPutPage(page, 1);
got_it:
CheckBlockCount(start, &sit);
SegInfoFromRawSit(se, &sit);
if (sbi.segs_per_sec > 1) {
struct sec_entry *e = GetSecEntry(start);
e->valid_blocks += se->valid_blocks;
}
}
}
void SegMgr::InitFreeSegmap() {
f2fs_sb_info &sbi = fs_->SbInfo();
unsigned int start;
int type;
for (start = 0; start < TOTAL_SEGS(&sbi); start++) {
struct seg_entry *sentry = GetSegEntry(start);
if (!sentry->valid_blocks)
__SetFree(start);
}
/* set use the current segments */
for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
struct curseg_info *curseg_t = CURSEG_I(&sbi, type);
__SetTestAndInuse(curseg_t->segno);
}
}
void SegMgr::InitDirtySegmap() {
f2fs_sb_info &sbi = fs_->SbInfo();
struct dirty_seglist_info *dirty_i = DIRTY_I(&sbi);
struct free_segmap_info *free_i = FREE_I(&sbi);
unsigned int segno = 0, offset = 0;
unsigned short valid_blocks;
int full_block_cnt = 0, dirty_block_cnt = 0;
while (segno < TOTAL_SEGS(&sbi)) {
/* find dirty segment based on free segmap */
segno = FindNextInuse(free_i, TOTAL_SEGS(&sbi), offset);
if (segno >= TOTAL_SEGS(&sbi))
break;
offset = segno + 1;
valid_blocks = GetValidBlocks(segno, 0);
if (valid_blocks >= sbi.blocks_per_seg || !valid_blocks) {
full_block_cnt++;
continue;
}
mtx_lock(&dirty_i->seglist_lock);
__LocateDirtySegment(segno, DIRTY);
dirty_block_cnt++;
mtx_unlock(&dirty_i->seglist_lock);
}
#ifdef F2FS_BU_DEBUG
std::cout << "SegMgr::InitDirtySegmap, full_block_cnt=" << full_block_cnt
<< ", dirty_block_cnt=" << dirty_block_cnt << std::endl;
#endif
}
int SegMgr::InitVictimSegmap() {
f2fs_sb_info &sbi = fs_->SbInfo();
struct dirty_seglist_info *dirty_i = DIRTY_I(&sbi);
unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(&sbi));
dirty_i->victim_segmap[FG_GC] = static_cast<unsigned long *>(malloc(bitmap_size));
memset(dirty_i->victim_segmap[FG_GC], 0, bitmap_size);
dirty_i->victim_segmap[BG_GC] = static_cast<unsigned long *>(malloc(bitmap_size));
memset(dirty_i->victim_segmap[BG_GC], 0, bitmap_size);
if (!dirty_i->victim_segmap[FG_GC] || !dirty_i->victim_segmap[BG_GC])
return -ENOMEM;
return 0;
}
int SegMgr::BuildDirtySegmap() {
f2fs_sb_info &sbi = fs_->SbInfo();
struct dirty_seglist_info *dirty_i;
unsigned int bitmap_size, i;
dirty_i = static_cast<struct dirty_seglist_info *>(malloc(sizeof(struct dirty_seglist_info)));
memset(dirty_i, 0, sizeof(struct dirty_seglist_info));
if (!dirty_i)
return -ENOMEM;
SM_I(&sbi)->dirty_info = dirty_i;
mtx_init(&dirty_i->seglist_lock, mtx_plain);
bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(&sbi));
for (i = 0; i < NR_DIRTY_TYPE; i++) {
dirty_i->dirty_segmap[i] = static_cast<unsigned long *>(malloc(bitmap_size));
memset(dirty_i->dirty_segmap[i], 0, bitmap_size);
dirty_i->nr_dirty[i] = 0;
if (!dirty_i->dirty_segmap[i])
return -ENOMEM;
}
InitDirtySegmap();
return InitVictimSegmap();
}
/**
* Update min, max modified time for cost-benefit GC algorithm
*/
void SegMgr::InitMinMaxMtime() {
f2fs_sb_info &sbi = fs_->SbInfo();
struct sit_info *sit_i = SIT_I(&sbi);
unsigned int segno;
mtx_lock(&sit_i->sentry_lock);
sit_i->min_mtime = LLONG_MAX;
for (segno = 0; segno < TOTAL_SEGS(&sbi); segno += sbi.segs_per_sec) {
unsigned int i;
unsigned long long mtime = 0;
for (i = 0; i < sbi.segs_per_sec; i++)
mtime += GetSegEntry(segno + i)->mtime;
mtime = div_u64(mtime, sbi.segs_per_sec);
if (sit_i->min_mtime > mtime)
sit_i->min_mtime = mtime;
}
sit_i->max_mtime = GetMtime();
mtx_unlock(&sit_i->sentry_lock);
}
zx_status_t SegMgr::BuildSegmentManager() {
f2fs_sb_info &sbi = fs_->SbInfo();
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(&sbi);
struct f2fs_checkpoint *ckpt = F2FS_CKPT(&sbi);
struct f2fs_sm_info *sm_info = nullptr;
int err;
sm_info = new f2fs_sm_info;
if (!sm_info)
return ZX_ERR_NO_MEMORY;
/* init sm info */
sbi.sm_info = sm_info;
list_initialize(&sm_info->wblist_head);
spin_lock_init(&sm_info->wblist_lock);
sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
err = BuildSitInfo();
if (err)
return err;
err = BuildFreeSegmap();
if (err)
return err;
err = BuildCurseg();
if (err)
return err;
/* reinit free segmap based on SIT */
BuildSitEntries();
InitFreeSegmap();
err = BuildDirtySegmap();
if (err)
return err;
#ifdef F2FS_BU_DEBUG
std::cout << "SegMgr::BuildSegmentManager(), TOTAL_SEGS(&sbi)=" << TOTAL_SEGS(&sbi)
<< std::endl;
std::cout << "SegMgr::BuildSegmentManager(), ReservedSections()=" << ReservedSections()
<< std::endl;
std::cout << "SegMgr::BuildSegmentManager(), OverprovisionSections()=" << OverprovisionSections()
<< std::endl;
#endif
InitMinMaxMtime();
return ZX_OK;
}
void SegMgr::DiscardDirtySegmap(enum dirty_type dirty_type) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct dirty_seglist_info *dirty_i = DIRTY_I(&sbi);
mtx_lock(&dirty_i->seglist_lock);
free(dirty_i->dirty_segmap[dirty_type]);
dirty_i->nr_dirty[dirty_type] = 0;
mtx_unlock(&dirty_i->seglist_lock);
}
void SegMgr::ResetVictimSegmap() {
f2fs_sb_info &sbi = fs_->SbInfo();
unsigned int bitmap_size = f2fs_bitmap_size(TOTAL_SEGS(&sbi));
memset(DIRTY_I(&sbi)->victim_segmap[FG_GC], 0, bitmap_size);
}
void SegMgr::DestroyVictimSegmap() {
f2fs_sb_info &sbi = fs_->SbInfo();
struct dirty_seglist_info *dirty_i = DIRTY_I(&sbi);
free(dirty_i->victim_segmap[FG_GC]);
free(dirty_i->victim_segmap[BG_GC]);
}
void SegMgr::DestroyDirtySegmap() {
f2fs_sb_info &sbi = fs_->SbInfo();
struct dirty_seglist_info *dirty_i = DIRTY_I(&sbi);
int i;
if (!dirty_i)
return;
/* discard pre-free/dirty segments list */
for (i = 0; i < NR_DIRTY_TYPE; i++)
DiscardDirtySegmap((dirty_type)i);
DestroyVictimSegmap();
SM_I(&sbi)->dirty_info = NULL;
free(dirty_i);
}
// TODO: destroy_curseg
void SegMgr::DestroyCurseg() {
f2fs_sb_info &sbi = fs_->SbInfo();
struct curseg_info *array = SM_I(&sbi)->curseg_array;
int i;
if (!array)
return;
SM_I(&sbi)->curseg_array = nullptr;
for (i = 0; i < NR_CURSEG_TYPE; i++)
free(array[i].sum_blk);
free(array);
}
void SegMgr::DestroyFreeSegmap() {
f2fs_sb_info &sbi = fs_->SbInfo();
struct free_segmap_info *free_i = SM_I(&sbi)->free_info;
if (!free_i)
return;
SM_I(&sbi)->free_info = nullptr;
free(free_i->free_segmap);
free(free_i->free_secmap);
free(free_i);
}
void SegMgr::DestroySitInfo() {
f2fs_sb_info &sbi = fs_->SbInfo();
struct sit_info *sit_i = SIT_I(&sbi);
unsigned int start;
if (!sit_i)
return;
if (sit_i->sentries) {
for (start = 0; start < TOTAL_SEGS(&sbi); start++) {
free(sit_i->sentries[start].cur_valid_map);
free(sit_i->sentries[start].ckpt_valid_map);
}
}
free(sit_i->sentries);
free(sit_i->sec_entries);
free(sit_i->dirty_sentries_bitmap);
SM_I(&sbi)->sit_info = nullptr;
free(sit_i->sit_bitmap);
free(sit_i);
}
void SegMgr::DestroySegmentManager() {
f2fs_sb_info &sbi = fs_->SbInfo();
struct f2fs_sm_info *sm_info = SM_I(&sbi);
DestroyDirtySegmap();
DestroyCurseg();
DestroyFreeSegmap();
DestroySitInfo();
sbi.sm_info = nullptr;
free(sm_info);
}
} // namespace f2fs