blob: e68c89c6ebdaa14c2ea8ea14dc1da58862fd09c4 [file] [log] [blame]
// Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef F2FS_SEGMENT_H_
#define F2FS_SEGMENT_H_
#include "zircon/types.h"
namespace f2fs {
/* constant macro */
constexpr uint32_t kNullSegNo = (uint32_t)(~0);
/* during checkpoint, bio_private is used to synchronize the last bio */
struct bio_private {
bool is_sync = false;
void *wait = nullptr;
};
/*
* indicate a block allocation direction: RIGHT and LEFT.
* RIGHT means allocating new sections towards the end of volume.
* LEFT means the opposite direction.
*/
enum class AllocDirection { kAllocRight = 0, kAllocLeft, };
/*
* In the victim_sel_policy->alloc_mode, there are two block allocation modes.
* LFS writes data sequentially with cleaning operations.
* SSR (Slack Space Recycle) reuses obsolete space without cleaning operations.
*/
enum class AllocMode { kLFS = 0, kSSR };
/*
* In the victim_sel_policy->gc_mode, there are two gc, aka cleaning, modes.
* GC_CB is based on cost-benefit algorithm.
* GC_GREEDY is based on greedy algorithm.
*/
enum class GcAlgorithm { kGcCb = 0, kGcGreedy };
/*
* BG_GC means the background cleaning job.
* FG_GC means the on-demand cleaning job.
*/
enum class GcType { kBgGc = 0, kFgGc };
/* for a function parameter to select a victim segment */
struct victim_sel_policy {
int alloc_mode = 0; /* LFS or SSR */
int gc_mode = 0; /* GC_CB or GC_GREEDY */
uint64_t *dirty_segmap = nullptr; /* dirty segment bitmap */
uint32_t offset = 0; /* last scanned bitmap offset */
uint32_t ofs_unit = 0; /* bitmap search unit */
uint32_t min_cost = 0; /* minimum cost */
uint32_t min_segno = 0; /* segment # having min. cost */
};
struct seg_entry {
uint16_t valid_blocks = 0; /* # of valid blocks */
uint8_t *cur_valid_map = nullptr; /* validity bitmap of blocks */
/*
* # of valid blocks and the validity bitmap stored in the the last
* checkpoint pack. This information is used by the SSR mode.
*/
uint16_t ckpt_valid_blocks = 0;
uint8_t *ckpt_valid_map = nullptr;
uint8_t type = 0; /* segment type like CURSEG_XXX_TYPE */
uint64_t mtime = 0; /* modification time of the segment */
};
struct sec_entry {
uint32_t valid_blocks = 0; /* # of valid blocks in a section */
};
struct segment_allocation {
void (*allocate_segment)(f2fs_sb_info *, int, bool) = nullptr;
};
struct sit_info {
const segment_allocation *s_ops = nullptr;
block_t sit_base_addr = 0; /* start block address of SIT area */
block_t sit_blocks = 0; /* # of blocks used by SIT area */
block_t written_valid_blocks = 0; /* # of valid blocks in main area */
char *sit_bitmap = nullptr; /* SIT bitmap pointer */
uint32_t bitmap_size = 0; /* SIT bitmap size */
uint64_t *dirty_sentries_bitmap = nullptr; /* bitmap for dirty sentries */
uint32_t dirty_sentries = 0; /* # of dirty sentries */
uint32_t sents_per_block = 0; /* # of SIT entries per block */
mtx_t sentry_lock; /* to protect SIT cache */
seg_entry *sentries = nullptr; /* SIT segment-level cache */
sec_entry *sec_entries = nullptr; /* SIT section-level cache */
/* for cost-benefit algorithm in cleaning procedure */
uint64_t elapsed_time = 0; /* elapsed time after mount */
uint64_t mounted_time = 0; /* mount time */
uint64_t min_mtime = 0; /* min. modification time */
uint64_t max_mtime = 0; /* max. modification time */
};
struct free_segmap_info {
uint32_t start_segno = 0; /* start segment number logically */
uint32_t free_segments = 0; /* # of free segments */
uint32_t free_sections = 0; /* # of free sections */
rwlock_t segmap_lock; /* free segmap lock */
uint64_t *free_segmap = nullptr; /* free segment bitmap */
uint64_t *free_secmap = nullptr; /* free section bitmap */
};
/* Notice: The order of dirty type is same with CURSEG_XXX in f2fs.h */
enum class DirtyType {
kDirtyHotData = 0, /* dirty segments assigned as hot data logs */
kDirtyWarmData, /* dirty segments assigned as warm data logs */
kDirtyColdData, /* dirty segments assigned as cold data logs */
kDirtyHotNode, /* dirty segments assigned as hot node logs */
kDirtyWarmNode, /* dirty segments assigned as warm node logs */
kDirtyColdNode, /* dirty segments assigned as cold node logs */
kDirty, /* to count # of dirty segments */
kPre, /* to count # of entirely obsolete segments */
kNrDirtytype
};
struct dirty_seglist_info {
const struct victim_selection *v_ops = nullptr; /* victim selction operation */
uint64_t *dirty_segmap[static_cast<int>(DirtyType::kNrDirtytype)] = {};
mtx_t seglist_lock; /* lock for segment bitmaps */
int nr_dirty[static_cast<int>(DirtyType::kNrDirtytype)] = {}; /* # of dirty segments */
uint64_t *victim_segmap[2] = {}; /* BG_GC, FG_GC */
};
/* victim selection function for cleaning and SSR */
struct victim_selection {
int (*get_victim)(f2fs_sb_info *, uint32_t *, int, int, char) = nullptr;
};
/* for active log information */
struct curseg_info {
mtx_t curseg_mutex; /* lock for consistency */
f2fs_summary_block *sum_blk = nullptr; /* cached summary block */
uint8_t alloc_type = 0; /* current allocation type */
uint32_t segno = 0; /* current segment number */
uint16_t next_blkoff = 0; /* next block offset to write */
uint32_t zone = 0; /* current zone number */
uint32_t next_segno = 0; /* preallocated segment */
};
/* V: Logical segment # in volume, R: Relative segment # in main area */
inline uint32_t GetL2RSegNo(free_segmap_info *free_i, uint32_t segno) { return (segno - free_i->start_segno); }
inline uint32_t GetR2LSegNo(free_segmap_info *free_i, uint32_t segno) { return (segno + free_i->start_segno); }
inline uint32_t IsDataSeg(uint32_t t) {
return ((t == CURSEG_HOT_DATA) || (t == CURSEG_COLD_DATA) || (t == CURSEG_WARM_DATA));
}
inline uint32_t IsNodeSeg(uint32_t t) {
return ((t == CURSEG_HOT_NODE) || (t == CURSEG_COLD_NODE) || (t == CURSEG_WARM_NODE));
}
inline block_t StartBlock(f2fs_sb_info *sbi, uint32_t segno) {
return (SM_I(sbi)->seg0_blkaddr + (GetR2LSegNo(FREE_I(sbi), segno) << (sbi)->log_blocks_per_seg));
}
inline block_t NextFreeBlkAddr(f2fs_sb_info *sbi, curseg_info *curseg) {
return (StartBlock(sbi, curseg->segno) + curseg->next_blkoff);
}
inline block_t MainBaseBlock(f2fs_sb_info *sbi) { return SM_I(sbi)->main_blkaddr;}
inline block_t GetSegOffFromSeg0(f2fs_sb_info *sbi, block_t blk_addr) { return blk_addr - SM_I(sbi)->seg0_blkaddr; }
inline uint32_t GetSegNoFromSeg0(f2fs_sb_info *sbi, block_t blk_addr) {
return GetSegOffFromSeg0(sbi, blk_addr) >> sbi->log_blocks_per_seg;
}
inline uint32_t GetSegNo(f2fs_sb_info *sbi, block_t blk_addr) {
return ((blk_addr == NULL_ADDR) || (blk_addr == NEW_ADDR)) ? kNullSegNo
: GetL2RSegNo(FREE_I(sbi), GetSegNoFromSeg0(sbi, blk_addr));
}
inline uint32_t GetSecNo(f2fs_sb_info *sbi, uint32_t segno) { return segno / sbi->segs_per_sec; }
inline uint32_t GetZoneNoFromSegNo(f2fs_sb_info *sbi, uint32_t segno) {
return (segno / sbi->segs_per_sec) / sbi->secs_per_zone;
}
inline block_t GetSumBlock(f2fs_sb_info *sbi, uint32_t segno) {
return (sbi->sm_info->ssa_blkaddr) + segno;
}
inline uint32_t SitEntryOffset(sit_info *sit_i, uint32_t segno) { return segno % sit_i->sents_per_block; }
inline uint32_t SitBlockOffset(sit_info *sit_i, uint32_t segno) { return segno / SIT_ENTRY_PER_BLOCK; }
inline uint32_t StartSegNo(sit_info *sit_i, uint32_t segno) { return SitBlockOffset(sit_i, segno) * SIT_ENTRY_PER_BLOCK; }
inline uint32_t BitmapSize(uint32_t nr) { return BitsToLongs(nr) * sizeof(uint64_t); }
inline uint32_t TotalSegs(f2fs_sb_info *sbi) { return SM_I(sbi)->main_segments; }
class SegMgr {
public:
// Not copyable or moveable
SegMgr(const SegMgr &) = delete;
SegMgr &operator=(const SegMgr &) = delete;
SegMgr(SegMgr &&) = delete;
SegMgr &operator=(SegMgr &&) = delete;
// TODO: Implement constructor
SegMgr(F2fs *fs);
// TODO: Implement destructor
~SegMgr() = default;
// Static functions
static curseg_info *CURSEG_I(f2fs_sb_info *sbi, int type);
static int LookupJournalInCursum(f2fs_summary_block *sum, int type, uint32_t val,
int alloc);
// Public functions
zx_status_t BuildSegmentManager();
void DestroySegmentManager();
void RewriteNodePage(Page *page, f2fs_summary *sum, block_t old_blkaddr,
block_t new_blkaddr);
private:
F2fs *fs_;
public:
// Inline functions
seg_entry *GetSegEntry(uint32_t segno);
sec_entry *GetSecEntry(uint32_t segno);
uint32_t GetValidBlocks(uint32_t segno, int section);
void SegInfoFromRawSit(seg_entry *se, f2fs_sit_entry *rs);
void SegInfoToRawSit(seg_entry *se, f2fs_sit_entry *rs);
uint32_t FindNextInuse(free_segmap_info *free_i, uint32_t max, uint32_t segno);
void __SetFree(uint32_t segno);
void __SetInuse(uint32_t segno);
void __SetTestAndFree(uint32_t segno);
void __SetTestAndInuse(uint32_t segno);
void GetSitBitmap(void *dst_addr);
#if 0 // porting needed
block_t WrittenBlockCount();
#endif
uint32_t FreeSegments();
int ReservedSegments();
uint32_t FreeSections();
uint32_t PrefreeSegments();
uint32_t DirtySegments();
int OverprovisionSegments();
int OverprovisionSections();
int ReservedSections();
bool NeedSSR();
int GetSsrSegment(int type);
bool HasNotEnoughFreeSecs();
uint32_t Utilization();
bool NeedInplaceUpdate(VnodeF2fs *vnode);
uint32_t CursegSegno(int type);
uint8_t CursegAllocType(int type);
uint16_t CursegBlkoff(int type);
void CheckSegRange(uint32_t segno);
#if 0 // porting needed
void VerifyBlockAddr(block_t blk_addr);
#endif
void CheckBlockCount(int segno, f2fs_sit_entry *raw_sit);
pgoff_t CurrentSitAddr(uint32_t start);
pgoff_t NextSitAddr(pgoff_t block_addr);
void SetToNextSit(sit_info *sit_i, uint32_t start);
uint64_t GetMtime();
void SetSummary(f2fs_summary *sum, nid_t nid, uint32_t ofs_in_node,
uint8_t version);
block_t StartSumBlock();
block_t SumBlkAddr(int base, int type);
// Functions
int NeedToFlush();
void F2fsBalanceFs();
void __LocateDirtySegment(uint32_t segno, enum DirtyType dirty_type);
void __RemoveDirtySegment(uint32_t segno, enum DirtyType dirty_type);
void LocateDirtySegment(uint32_t segno);
void SetPrefreeAsFreeSegments();
void ClearPrefreeSegments();
void __MarkSitEntryDirty(uint32_t segno);
void __SetSitEntryType(int type, uint32_t segno, int modified);
void UpdateSitEntry(block_t blkaddr, int del);
void RefreshSitEntry(block_t old_blkaddr, block_t new_blkaddr);
void InvalidateBlocks(block_t addr);
void __AddSumEntry(int type, f2fs_summary *sum, uint16_t offset);
int NpagesForSummaryFlush();
Page *GetSumPage(uint32_t segno);
void WriteSumPage(f2fs_summary_block *sum_blk, block_t blk_addr);
uint32_t CheckPrefreeSegments(int ofs_unit, int type);
void GetNewSegment(uint32_t *newseg, bool new_sec, int dir);
void ResetCurseg(int type, int modified);
void NewCurseg(int type, bool new_sec);
void __NextFreeBlkoff(curseg_info *seg, block_t start);
void __RefreshNextBlkoff(curseg_info *seg);
void ChangeCurseg(int type, bool reuse);
void AllocateSegmentByDefault(int type, bool force);
void AllocateNewSegments();
#if 0 // porting needed
// const struct segment_allocation default_salloc_ops = {
// .allocate_segment = AllocateSegmentByDefault,
// };
#endif
#if 0 // porting needed
void F2fsEndIoWrite(bio *bio, int err);
bio *F2fsBioAlloc(block_device *bdev, sector_t first_sector, int nr_vecs,
gfp_t gfp_flags);
void DoSubmitBio(enum page_type type, bool sync);
#endif
void F2fsSubmitBio(enum page_type type, bool sync);
void SubmitWritePage(Page *page, block_t blk_addr, enum page_type type);
bool __HasCursegSpace(int type);
int __GetSegmentType2(Page *page, enum page_type p_type);
int __GetSegmentType4(Page *page, enum page_type p_type);
int __GetSegmentType6(Page *page, enum page_type p_type);
int __GetSegmentType(Page *page, enum page_type p_type);
void DoWritePage(Page *page, block_t old_blkaddr, block_t *new_blkaddr, f2fs_summary *sum,
enum page_type p_type);
zx_status_t WriteMetaPage(Page *page, WritebackControl *wbc);
void WriteNodePage(Page *page, uint32_t nid, block_t old_blkaddr, block_t *new_blkaddr);
void WriteDataPage(VnodeF2fs *vnode, Page *page, dnode_of_data *dn, block_t old_blkaddr,
block_t *new_blkaddr);
void RewriteDataPage(Page *page, block_t old_blk_addr);
void RecoverDataPage(Page *page, f2fs_summary *sum, block_t old_blkaddr,
block_t new_blkaddr);
int ReadCompactedSummaries();
int ReadNormalSummaries(int type);
int RestoreCursegSummaries();
void WriteCompactedSummaries(block_t blkaddr);
void WriteNormalSummaries(block_t blkaddr, int type);
void WriteDataSummaries(block_t start_blk);
void WriteNodeSummaries(block_t start_blk);
Page *GetCurrentSitPage(uint32_t segno);
Page *GetNextSitPage(uint32_t start);
bool FlushSitsInJournal();
void FlushSitEntries();
//////////////////////////////////////////// BUILD
///////////////////////////////////////////////////////////
zx_status_t BuildSitInfo();
zx_status_t BuildFreeSegmap();
zx_status_t BuildCurseg();
void BuildSitEntries();
void InitFreeSegmap();
void InitDirtySegmap();
zx_status_t InitVictimSegmap();
zx_status_t BuildDirtySegmap();
void InitMinMaxMtime();
void DiscardDirtySegmap(enum DirtyType dirty_type);
void ResetVictimSegmap();
void DestroyVictimSegmap();
void DestroyDirtySegmap();
void DestroyCurseg();
void DestroyFreeSegmap();
void DestroySitInfo();
};
inline curseg_info *SegMgr::CURSEG_I(f2fs_sb_info *sbi, int type) {
return (curseg_info *)(SM_I(sbi)->curseg_array + type);
}
inline bool IsCurSeg(f2fs_sb_info *sbi, uint32_t segno) {
return ((segno == SegMgr::CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) ||
(segno == SegMgr::CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) ||
(segno == SegMgr::CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) ||
(segno == SegMgr::CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) ||
(segno == SegMgr::CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) ||
(segno == SegMgr::CURSEG_I(sbi, CURSEG_COLD_NODE)->segno));
}
inline bool IsCurSec(f2fs_sb_info *sbi, uint32_t secno) {
return ((secno == SegMgr::CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / (sbi)->segs_per_sec) ||
(secno == SegMgr::CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / (sbi)->segs_per_sec) ||
(secno == SegMgr::CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / (sbi)->segs_per_sec) ||
(secno == SegMgr::CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / (sbi)->segs_per_sec) ||
(secno == SegMgr::CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / (sbi)->segs_per_sec) ||
(secno == SegMgr::CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / (sbi)->segs_per_sec));
}
} // namespace f2fs
#endif // F2FS_SEGMENT_H_