[f2fs] code refactoring: node.h, node.cc
Change struct xxx to xxx (node.h)
(comment: #312 https://fuchsia-review.googlesource.com/c/third_party/f2fs/+/520881/2)
Change #define to constexpr (node.h, node.cc)
(comment: #9, #128 https://fuchsia-review.googlesource.com/c/third_party/f2fs/+/511840)
Add default initializers for member variables (node.h)
(comment: #53 https://fuchsia-review.googlesource.com/c/third_party/f2fs/+/511839)
Change enum to enum class (node.h)
(comment: #429 https://fuchsia-review.googlesource.com/c/third_party/f2fs/+/511841)
Change unsigned int style to uint32_t (node.h, node.cc)
(comment: #142 https://fuchsia-review.googlesource.com/c/third_party/f2fs/+/511840)
Change struct xxx to xxx (node.cc)
(comment: #312 https://fuchsia-review.googlesource.com/c/third_party/f2fs/+/520881/2)
Change C style cast to static_cast (node.cc)
(comment: #571 https://fuchsia-review.googlesource.com/c/third_party/f2fs/+/520880)
Change #define to inline function (node.cc)
(comment: #749 https://fuchsia-review.googlesource.com/c/third_party/f2fs/+/511842)
Change error return using zx_status_t (node.h, node.cc)
(comment: #24, #106 https://fuchsia-review.googlesource.com/c/third_party/f2fs/+/520882)
Remove unnecessary [[maybe_unused]] (node.cc)
Remove double-underscore names (node.h, node.cc)
Change-Id: Ie4e47da9914a220fab1481699dc6672db52721bf
Reviewed-on: https://fuchsia-review.googlesource.com/c/third_party/f2fs/+/530804
Reviewed-by: Brett Wilson <brettw@google.com>
diff --git a/node.cc b/node.cc
index 29a0a0b..53952d1 100644
--- a/node.cc
+++ b/node.cc
@@ -13,8 +13,14 @@
namespace f2fs {
-#define __set_nat_cache_dirty(nm_i, ne) list_move_tail(&nm_i->dirty_nat_entries, &ne->list);
-#define __clear_nat_cache_dirty(nm_i, ne) list_move_tail(&nm_i->nat_entries, &ne->list);
+
+inline void SetNatCacheDirty(f2fs_nm_info *nm_i, nat_entry *ne) {
+ list_move_tail(&nm_i->dirty_nat_entries, &ne->list);
+}
+
+inline void ClearNatCacheDirty(f2fs_nm_info *nm_i, nat_entry *ne) {
+ list_move_tail(&nm_i->nat_entries, &ne->list);
+}
inline void NodeMgr::NodeInfoFromRawNat(node_info *ni, f2fs_nat_entry *raw_ne) {
ni->ino = LeToCpu(raw_ne->ino);
@@ -22,15 +28,15 @@
ni->version = raw_ne->version;
}
-inline bool NodeMgr::inc_valid_node_count(struct f2fs_sb_info *sbi, VnodeF2fs *vnode,
- unsigned int count) {
+inline bool NodeMgr::inc_valid_node_count(f2fs_sb_info *sbi, VnodeF2fs *vnode,
+ uint32_t count) {
block_t valid_block_count;
- unsigned int valid_node_count;
+ uint32_t valid_node_count;
SpinLock(&sbi->stat_lock);
- valid_block_count = sbi->total_valid_block_count + (block_t)count;
- sbi->alloc_valid_block_count += (block_t)count;
+ valid_block_count = sbi->total_valid_block_count + static_cast<block_t>(count);
+ sbi->alloc_valid_block_count += static_cast<block_t>(count);
valid_node_count = sbi->total_valid_node_count + count;
if (valid_block_count > sbi->user_block_count) {
@@ -57,13 +63,13 @@
*/
zx_status_t NodeMgr::NextFreeNid(nid_t *nid) {
f2fs_sb_info &sbi = fs_->SbInfo();
- struct f2fs_nm_info *nm_i = NM_I(&sbi);
- struct free_nid *fnid;
+ f2fs_nm_info *nm_i = NM_I(&sbi);
+ free_nid *fnid;
if (nm_i->fcnt <= 0)
return ZX_ERR_OUT_OF_RANGE;
SpinLock(&nm_i->free_nid_list_lock);
- fnid = containerof(nm_i->free_nid_list.next, struct free_nid, list);
+ fnid = containerof(nm_i->free_nid_list.next, free_nid, list);
*nid = fnid->nid;
SpinUnlock(&nm_i->free_nid_list_lock);
return ZX_OK;
@@ -71,22 +77,22 @@
void NodeMgr::GetNatBitmap(void *addr) {
f2fs_sb_info &sbi = fs_->SbInfo();
- struct f2fs_nm_info *nm_i = NM_I(&sbi);
+ f2fs_nm_info *nm_i = NM_I(&sbi);
memcpy(addr, nm_i->nat_bitmap, nm_i->bitmap_size);
memcpy(nm_i->nat_prev_bitmap, nm_i->nat_bitmap, nm_i->bitmap_size);
}
inline pgoff_t NodeMgr::CurrentNatAddr(nid_t start) {
f2fs_sb_info &sbi = fs_->SbInfo();
- struct f2fs_nm_info *nm_i = NM_I(&sbi);
+ f2fs_nm_info *nm_i = NM_I(&sbi);
pgoff_t block_off;
pgoff_t block_addr;
int seg_off;
- block_off = NAT_BLOCK_OFFSET(start);
+ block_off = NatBlockOffset(start);
seg_off = block_off >> sbi.log_blocks_per_seg;
- block_addr = (pgoff_t)(nm_i->nat_blkaddr + (seg_off << sbi.log_blocks_per_seg << 1) +
+ block_addr = static_cast<pgoff_t>(nm_i->nat_blkaddr + (seg_off << sbi.log_blocks_per_seg << 1) +
(block_off & ((1 << sbi.log_blocks_per_seg) - 1)));
if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
@@ -97,10 +103,10 @@
inline bool NodeMgr::IsUpdatedNatPage(nid_t start) {
f2fs_sb_info &sbi = fs_->SbInfo();
- struct f2fs_nm_info *nm_i = NM_I(&sbi);
+ f2fs_nm_info *nm_i = NM_I(&sbi);
pgoff_t block_off;
- block_off = NAT_BLOCK_OFFSET(start);
+ block_off = NatBlockOffset(start);
return (f2fs_test_bit(block_off, nm_i->nat_bitmap) ^
f2fs_test_bit(block_off, nm_i->nat_prev_bitmap));
@@ -108,7 +114,7 @@
inline pgoff_t NodeMgr::NextNatAddr(pgoff_t block_addr) {
f2fs_sb_info &sbi = fs_->SbInfo();
- struct f2fs_nm_info *nm_i = NM_I(&sbi);
+ f2fs_nm_info *nm_i = NM_I(&sbi);
block_addr -= nm_i->nat_blkaddr;
if ((block_addr >> sbi.log_blocks_per_seg) % 2)
@@ -119,8 +125,8 @@
return block_addr + nm_i->nat_blkaddr;
}
-inline void NodeMgr::SetToNextNat(struct f2fs_nm_info *nm_i, nid_t start_nid) {
- unsigned int block_off = NAT_BLOCK_OFFSET(start_nid);
+inline void NodeMgr::SetToNextNat(f2fs_nm_info *nm_i, nid_t start_nid) {
+ uint32_t block_off = NatBlockOffset(start_nid);
if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
f2fs_clear_bit(block_off, nm_i->nat_bitmap);
@@ -128,10 +134,10 @@
f2fs_set_bit(block_off, nm_i->nat_bitmap);
}
-inline void NodeMgr::FillNodeFooter(Page *page, nid_t nid, nid_t ino, unsigned int ofs,
+inline void NodeMgr::FillNodeFooter(Page *page, nid_t nid, nid_t ino, uint32_t ofs,
bool reset) {
void *kaddr = PageAddress(page);
- struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+ f2fs_node *rn = static_cast<f2fs_node *>(kaddr);
if (reset)
memset(rn, 0, sizeof(*rn));
rn->footer.nid = CpuToLe(nid);
@@ -142,50 +148,50 @@
void NodeMgr::CopyNodeFooter(Page *dst, Page *src) {
void *src_addr = PageAddress(src);
void *dst_addr = PageAddress(dst);
- struct f2fs_node *src_rn = (struct f2fs_node *)src_addr;
- struct f2fs_node *dst_rn = (struct f2fs_node *)dst_addr;
- memcpy(&dst_rn->footer, &src_rn->footer, sizeof(struct node_footer));
+ f2fs_node *src_rn = static_cast<f2fs_node *>(src_addr);
+ f2fs_node *dst_rn = static_cast<f2fs_node *>(dst_addr);
+ memcpy(&dst_rn->footer, &src_rn->footer, sizeof(node_footer));
}
void NodeMgr::FillNodeFooterBlkaddr(Page *page, block_t blkaddr) {
// TODO: IMPL
- // struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
+ // f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
f2fs_sb_info &sbi = fs_->SbInfo();
- struct f2fs_checkpoint *ckpt = F2FS_CKPT(&sbi);
+ f2fs_checkpoint *ckpt = F2FS_CKPT(&sbi);
void *kaddr = PageAddress(page);
- struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+ f2fs_node *rn = static_cast<f2fs_node *>(kaddr);
rn->footer.cp_ver = ckpt->checkpoint_ver;
rn->footer.next_blkaddr = blkaddr;
}
inline nid_t NodeMgr::InoOfNode(Page *node_page) {
void *kaddr = PageAddress(node_page);
- struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+ f2fs_node *rn = static_cast<f2fs_node *>(kaddr);
return LeToCpu(rn->footer.ino);
}
inline nid_t NodeMgr::NidOfNode(Page *node_page) {
void *kaddr = PageAddress(node_page);
- struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+ f2fs_node *rn = static_cast<f2fs_node *>(kaddr);
return LeToCpu(rn->footer.nid);
}
-unsigned int NodeMgr::OfsOfNode(Page *node_page) {
+uint32_t NodeMgr::OfsOfNode(Page *node_page) {
void *kaddr = PageAddress(node_page);
- struct f2fs_node *rn = (struct f2fs_node *)kaddr;
- unsigned flag = LeToCpu(rn->footer.flag);
+ f2fs_node *rn = static_cast<f2fs_node *>(kaddr);
+ uint32_t flag = LeToCpu(rn->footer.flag);
return flag >> OFFSET_BIT_SHIFT;
}
-unsigned long long NodeMgr::CpverOfNode(Page *node_page) {
+uint64_t NodeMgr::CpverOfNode(Page *node_page) {
void *kaddr = PageAddress(node_page);
- struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+ f2fs_node *rn = static_cast<f2fs_node *>(kaddr);
return LeToCpu(rn->footer.cp_ver);
}
block_t NodeMgr::NextBlkaddrOfNode(Page *node_page) {
void *kaddr = PageAddress(node_page);
- struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+ f2fs_node *rn = static_cast<f2fs_node *>(kaddr);
return LeToCpu(rn->footer.next_blkaddr);
}
@@ -205,19 +211,19 @@
* `- direct node (x(N + 1))
*/
bool NodeMgr::IS_DNODE(Page *node_page) {
- unsigned int ofs = OfsOfNode(node_page);
+ uint32_t ofs = OfsOfNode(node_page);
if (ofs == 3 || ofs == 4 + NIDS_PER_BLOCK || ofs == 5 + 2 * NIDS_PER_BLOCK)
return false;
if (ofs >= 6 + 2 * NIDS_PER_BLOCK) {
ofs -= 6 + 2 * NIDS_PER_BLOCK;
- if ((long int)ofs % (NIDS_PER_BLOCK + 1))
+ if (static_cast<int64_t>(ofs) % (NIDS_PER_BLOCK + 1))
return false;
}
return true;
}
inline void NodeMgr::SetNid(Page *p, int off, nid_t nid, bool i) {
- struct f2fs_node *rn = static_cast<struct f2fs_node *>(PageAddress(p));
+ f2fs_node *rn = static_cast<f2fs_node *>(PageAddress(p));
WaitOnPageWriteback(p);
@@ -234,7 +240,7 @@
}
inline nid_t NodeMgr::GetNid(Page *p, int off, bool i) {
- struct f2fs_node *rn = (struct f2fs_node *)PageAddress(p);
+ f2fs_node *rn = static_cast<f2fs_node *>(PageAddress(p));
if (i)
return LeToCpu(rn->i.i_nid[off - NODE_DIR1_BLOCK]);
return LeToCpu(rn->in.nid[off]);
@@ -269,28 +275,28 @@
int NodeMgr::IsColdNode(Page *page) {
void *kaddr = PageAddress(page);
- struct f2fs_node *rn = (struct f2fs_node *)kaddr;
- unsigned int flag = LeToCpu(rn->footer.flag);
+ f2fs_node *rn = static_cast<f2fs_node *>(kaddr);
+ uint32_t flag = LeToCpu(rn->footer.flag);
return flag & (0x1 << COLD_BIT_SHIFT);
}
-unsigned char NodeMgr::IsFsyncDnode(Page *page) {
+uint8_t NodeMgr::IsFsyncDnode(Page *page) {
void *kaddr = PageAddress(page);
- struct f2fs_node *rn = (struct f2fs_node *)kaddr;
- unsigned int flag = LeToCpu(rn->footer.flag);
+ f2fs_node *rn = static_cast<f2fs_node *>(kaddr);
+ uint32_t flag = LeToCpu(rn->footer.flag);
return flag & (0x1 << FSYNC_BIT_SHIFT);
}
-unsigned char NodeMgr::IsDentDnode(Page *page) {
+uint8_t NodeMgr::IsDentDnode(Page *page) {
void *kaddr = PageAddress(page);
- struct f2fs_node *rn = (struct f2fs_node *)kaddr;
- unsigned int flag = LeToCpu(rn->footer.flag);
+ f2fs_node *rn = static_cast<f2fs_node *>(kaddr);
+ uint32_t flag = LeToCpu(rn->footer.flag);
return flag & (0x1 << DENT_BIT_SHIFT);
}
inline void NodeMgr::SetColdNode(VnodeF2fs *vnode, Page *page) {
- struct f2fs_node *rn = (struct f2fs_node *)PageAddress(page);
- unsigned int flag = LeToCpu(rn->footer.flag);
+ f2fs_node *rn = static_cast<f2fs_node *>(PageAddress(page));
+ uint32_t flag = LeToCpu(rn->footer.flag);
if (S_ISDIR(vnode->i_mode_))
flag &= ~(0x1 << COLD_BIT_SHIFT);
@@ -301,8 +307,8 @@
void NodeMgr::SetFsyncMark(Page *page, int mark) {
void *kaddr = PageAddress(page);
- struct f2fs_node *rn = (struct f2fs_node *)kaddr;
- unsigned int flag = LeToCpu(rn->footer.flag);
+ f2fs_node *rn = static_cast<f2fs_node *>(kaddr);
+ uint32_t flag = LeToCpu(rn->footer.flag);
if (mark)
flag |= (0x1 << FSYNC_BIT_SHIFT);
else
@@ -312,8 +318,8 @@
void NodeMgr::SetDentryMark(Page *page, int mark) {
void *kaddr = PageAddress(page);
- struct f2fs_node *rn = (struct f2fs_node *)kaddr;
- unsigned int flag = LeToCpu(rn->footer.flag);
+ f2fs_node *rn = static_cast<f2fs_node *>(kaddr);
+ uint32_t flag = LeToCpu(rn->footer.flag);
if (mark)
flag |= (0x1 << DENT_BIT_SHIFT);
else
@@ -321,8 +327,8 @@
rn->footer.flag = CpuToLe(flag);
}
-inline void NodeMgr::DecValidNodeCount(struct f2fs_sb_info *sbi, VnodeF2fs *vnode,
- unsigned int count) {
+inline void NodeMgr::DecValidNodeCount(f2fs_sb_info *sbi, VnodeF2fs *vnode,
+ uint32_t count) {
SpinLock(&sbi->stat_lock);
// TODO: IMPL
@@ -346,8 +352,8 @@
void NodeMgr::ClearNodePageDirty(Page *page) {
f2fs_sb_info &sbi = fs_->SbInfo();
#if 0 // porting needed
- // struct address_space *mapping = page->mapping;
- // unsigned int long flags;
+ // address_space *mapping = page->mapping;
+ // uint32_t long flags;
#endif
if (PageDirty(page)) {
@@ -378,7 +384,7 @@
void *src_addr;
void *dst_addr;
f2fs_sb_info &sbi = fs_->SbInfo();
- struct f2fs_nm_info *nm_i = NM_I(&sbi);
+ f2fs_nm_info *nm_i = NM_I(&sbi);
src_off = CurrentNatAddr(nid);
dst_off = NextNatAddr(src_off);
@@ -412,14 +418,14 @@
* Readahead NAT pages
*/
void NodeMgr::RaNatPages(nid_t nid) {
- // struct address_space *mapping = sbi_->meta_inode->i_mapping;
+ // address_space *mapping = sbi_->meta_inode->i_mapping;
f2fs_sb_info &sbi = fs_->SbInfo();
- struct f2fs_nm_info *nm_i = NM_I(&sbi);
- struct Page *page;
+ f2fs_nm_info *nm_i = NM_I(&sbi);
+ Page *page;
pgoff_t index;
int i;
- for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) {
+ for (i = 0; i < kFreeNidPages; i++, nid += NAT_ENTRY_PER_BLOCK) {
if (nid >= nm_i->max_nid)
nid = 0;
index = CurrentNatAddr(nid);
@@ -438,23 +444,23 @@
}
}
-struct nat_entry *NodeMgr::__LookupNatCache(struct f2fs_nm_info *nm_i, nid_t n) {
+nat_entry *NodeMgr::LookupNatCache(f2fs_nm_info *nm_i, nid_t n) {
// TODO: IMPL
// TODO: need to be modified to use radix tree
list_node_t *cur, *next;
list_for_every_safe(&nm_i->dirty_nat_entries, cur, next) {
- struct nat_entry *e = containerof(cur, nat_entry, list);
+ nat_entry *e = containerof(cur, nat_entry, list);
- if (nat_get_nid(e) == n) {
+ if (NatGetNid(e) == n) {
return e;
}
}
list_for_every_safe(&nm_i->nat_entries, cur, next) {
- struct nat_entry *e = containerof(cur, nat_entry, list);
+ nat_entry *e = containerof(cur, nat_entry, list);
- if (nat_get_nid(e) == n) {
+ if (NatGetNid(e) == n) {
return e;
}
}
@@ -464,16 +470,16 @@
return nullptr;
}
-unsigned int NodeMgr::__GangLookupNatCache(struct f2fs_nm_info *nm_i, nid_t start, unsigned int nr,
- struct nat_entry **ep) {
+uint32_t NodeMgr::GangLookupNatCache(f2fs_nm_info *nm_i, nid_t start, uint32_t nr,
+ nat_entry **ep) {
// TODO: IMPL
// TODO: need to be modified to use radix tree
- unsigned int ret = 0;
+ uint32_t ret = 0;
- for (unsigned int i = 0; i < nr; i++) {
+ for (uint32_t i = 0; i < nr; i++) {
nid_t cur_nid = start + i;
- ep[ret] = __LookupNatCache(nm_i, cur_nid);
+ ep[ret] = LookupNatCache(nm_i, cur_nid);
if (ep[ret]) {
if (++ret == nr) {
break;
@@ -488,9 +494,9 @@
#endif
}
-void NodeMgr::__DelFromNatCache(struct f2fs_nm_info *nm_i, struct nat_entry *e) {
+void NodeMgr::DelFromNatCache(f2fs_nm_info *nm_i, nat_entry *e) {
#if 0 // porting needed
- // radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
+ // radix_tree_delete(&nm_i->nat_root, NatGetNid(e));
#endif
list_delete(&e->list);
nm_i->nat_cnt--;
@@ -502,20 +508,20 @@
int NodeMgr::IsCheckpointedNode(nid_t nid) {
f2fs_sb_info &sbi = fs_->SbInfo();
- struct f2fs_nm_info *nm_i = NM_I(&sbi);
- struct nat_entry *e;
+ f2fs_nm_info *nm_i = NM_I(&sbi);
+ nat_entry *e;
int is_cp = 1;
ReadLock(&nm_i->nat_tree_lock);
- e = __LookupNatCache(nm_i, nid);
+ e = LookupNatCache(nm_i, nid);
if (e && !e->checkpointed)
is_cp = 0;
ReadUnlock(&nm_i->nat_tree_lock);
return is_cp;
}
-struct nat_entry *NodeMgr::GrabNatEntry(struct f2fs_nm_info *nm_i, nid_t nid) {
- struct nat_entry *new_entry;
+nat_entry *NodeMgr::GrabNatEntry(f2fs_nm_info *nm_i, nid_t nid) {
+ nat_entry *new_entry;
#if 0 // porting needed (kmem_cache_alloc)
// new_entry = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
@@ -529,39 +535,39 @@
// return NULL;
// }
#endif
- memset(new_entry, 0, sizeof(struct nat_entry));
- nat_set_nid(new_entry, nid);
+ memset(new_entry, 0, sizeof(nat_entry));
+ NatSetNid(new_entry, nid);
list_add_tail(&nm_i->nat_entries, &new_entry->list);
nm_i->nat_cnt++;
return new_entry;
}
-void NodeMgr::CacheNatEntry(struct f2fs_nm_info *nm_i, nid_t nid, struct f2fs_nat_entry *ne) {
- struct nat_entry *e;
+void NodeMgr::CacheNatEntry(f2fs_nm_info *nm_i, nid_t nid, f2fs_nat_entry *ne) {
+ nat_entry *e;
retry:
WriteLock(&nm_i->nat_tree_lock);
- e = __LookupNatCache(nm_i, nid);
+ e = LookupNatCache(nm_i, nid);
if (!e) {
e = GrabNatEntry(nm_i, nid);
if (!e) {
WriteUnlock(&nm_i->nat_tree_lock);
goto retry;
}
- nat_set_blkaddr(e, LeToCpu(ne->block_addr));
- nat_set_ino(e, LeToCpu(ne->ino));
- nat_set_version(e, ne->version);
+ NatSetBlkaddr(e, LeToCpu(ne->block_addr));
+ NatSetIno(e, LeToCpu(ne->ino));
+ NatSetVersion(e, ne->version);
e->checkpointed = true;
}
WriteUnlock(&nm_i->nat_tree_lock);
}
-void NodeMgr::SetNodeAddr(struct node_info *ni, block_t new_blkaddr) {
+void NodeMgr::SetNodeAddr(node_info *ni, block_t new_blkaddr) {
f2fs_sb_info &sbi = fs_->SbInfo();
- struct f2fs_nm_info *nm_i = NM_I(&sbi);
- struct nat_entry *e;
+ f2fs_nm_info *nm_i = NM_I(&sbi);
+ nat_entry *e;
retry:
WriteLock(&nm_i->nat_tree_lock);
- e = __LookupNatCache(nm_i, ni->nid);
+ e = LookupNatCache(nm_i, ni->nid);
if (!e) {
e = GrabNatEntry(nm_i, ni->nid);
if (!e) {
@@ -585,38 +591,38 @@
e->checkpointed = false;
/* sanity check */
- ZX_ASSERT(!(nat_get_blkaddr(e) != ni->blk_addr));
- ZX_ASSERT(!(nat_get_blkaddr(e) == NULL_ADDR && new_blkaddr == NULL_ADDR));
- ZX_ASSERT(!(nat_get_blkaddr(e) == NEW_ADDR && new_blkaddr == NEW_ADDR));
- ZX_ASSERT(!(nat_get_blkaddr(e) != NEW_ADDR && nat_get_blkaddr(e) != NULL_ADDR &&
+ ZX_ASSERT(!(NatGetBlkaddr(e) != ni->blk_addr));
+ ZX_ASSERT(!(NatGetBlkaddr(e) == NULL_ADDR && new_blkaddr == NULL_ADDR));
+ ZX_ASSERT(!(NatGetBlkaddr(e) == NEW_ADDR && new_blkaddr == NEW_ADDR));
+ ZX_ASSERT(!(NatGetBlkaddr(e) != NEW_ADDR && NatGetBlkaddr(e) != NULL_ADDR &&
new_blkaddr == NEW_ADDR));
/* increament version no as node is removed */
- if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
- unsigned char version = nat_get_version(e);
- nat_set_version(e, inc_node_version(version));
+ if (NatGetBlkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
+ uint8_t version = NatGetVersion(e);
+ NatSetVersion(e, IncNodeVersion(version));
}
/* change address */
- nat_set_blkaddr(e, new_blkaddr);
- __set_nat_cache_dirty(nm_i, e);
+ NatSetBlkaddr(e, new_blkaddr);
+ SetNatCacheDirty(nm_i, e);
WriteUnlock(&nm_i->nat_tree_lock);
}
int NodeMgr::TryToFreeNats(int nr_shrink) {
f2fs_sb_info &sbi = fs_->SbInfo();
- struct f2fs_nm_info *nm_i = NM_I(&sbi);
+ f2fs_nm_info *nm_i = NM_I(&sbi);
- if (nm_i->nat_cnt < 2 * NM_WOUT_THRESHOLD)
+ if (nm_i->nat_cnt < 2 * kNmWoutThreshold)
return 0;
WriteLock(&nm_i->nat_tree_lock);
while (nr_shrink && !list_is_empty(&nm_i->nat_entries)) {
- struct nat_entry *ne;
+ nat_entry *ne;
// ne = list_first_entry(&nm_i->nat_entries,
- // struct nat_entry, list);
- ne = containerof((&nm_i->nat_entries)->next, struct nat_entry, list);
- __DelFromNatCache(nm_i, ne);
+ // nat_entry, list);
+ ne = containerof((&nm_i->nat_entries)->next, nat_entry, list);
+ DelFromNatCache(nm_i, ne);
nr_shrink--;
}
WriteUnlock(&nm_i->nat_tree_lock);
@@ -626,27 +632,27 @@
/**
* This function returns always success
*/
-void NodeMgr::GetNodeInfo(nid_t nid, struct node_info *ni) {
+void NodeMgr::GetNodeInfo(nid_t nid, node_info *ni) {
f2fs_sb_info &sbi = fs_->SbInfo();
- struct f2fs_nm_info *nm_i = NM_I(&sbi);
- struct curseg_info *curseg = SegMgr::CURSEG_I(&sbi, CURSEG_HOT_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
- nid_t start_nid = START_NID(nid);
- struct f2fs_nat_block *nat_blk;
- struct Page *page = NULL;
- struct f2fs_nat_entry ne;
- struct nat_entry *e;
+ f2fs_nm_info *nm_i = NM_I(&sbi);
+ curseg_info *curseg = SegMgr::CURSEG_I(&sbi, CURSEG_HOT_DATA);
+ f2fs_summary_block *sum = curseg->sum_blk;
+ nid_t start_nid = StartNid(nid);
+ f2fs_nat_block *nat_blk;
+ Page *page = NULL;
+ f2fs_nat_entry ne;
+ nat_entry *e;
int i;
ni->nid = nid;
/* Check nat cache */
ReadLock(&nm_i->nat_tree_lock);
- e = __LookupNatCache(nm_i, nid);
+ e = LookupNatCache(nm_i, nid);
if (e) {
- ni->ino = nat_get_ino(e);
- ni->blk_addr = nat_get_blkaddr(e);
- ni->version = nat_get_version(e);
+ ni->ino = NatGetIno(e);
+ ni->blk_addr = NatGetBlkaddr(e);
+ ni->version = NatGetVersion(e);
}
ReadUnlock(&nm_i->nat_tree_lock);
if (e)
@@ -665,7 +671,7 @@
/* Fill node_info from nat page */
page = GetCurrentNatPage(start_nid);
- nat_blk = (struct f2fs_nat_block *)PageAddress(page);
+ nat_blk = static_cast<f2fs_nat_block *>(PageAddress(page));
ne = nat_blk->entries[nid - start_nid];
NodeInfoFromRawNat(ni, &ne);
@@ -680,7 +686,7 @@
* The maximum depth is four.
* Offset[0] will have raw inode offset.
*/
-int NodeMgr::GetNodePath(long block, int offset[4], unsigned int noffset[4]) {
+int NodeMgr::GetNodePath(long block, int offset[4], uint32_t noffset[4]) {
const long direct_index = ADDRS_PER_INODE;
const long direct_blks = ADDRS_PER_BLOCK;
const long dptrs_per_blk = NIDS_PER_BLOCK;
@@ -753,12 +759,12 @@
/*
* Caller should call f2fs_put_dnode(dn).
*/
-zx_status_t NodeMgr::GetDnodeOfData(struct dnode_of_data *dn, pgoff_t index, int ro) {
+zx_status_t NodeMgr::GetDnodeOfData(dnode_of_data *dn, pgoff_t index, int ro) {
f2fs_sb_info &sbi = fs_->SbInfo();
Page *npage[4];
Page *parent;
int offset[4];
- unsigned int noffset[4];
+ uint32_t noffset[4];
nid_t nids[4];
int level, i;
zx_status_t err = 0;
@@ -786,7 +792,7 @@
/* alloc new node */
if (!AllocNid(&(nids[i]))) {
mutex_unlock_op(&sbi, NODE_NEW);
- err = -ENOSPC;
+ err = ZX_ERR_NO_SPACE;
goto release_pages;
}
@@ -847,7 +853,7 @@
<< ", dn->data_blkaddr=" << dn->data_blkaddr
<< std::endl;
#endif
- return 0;
+ return ZX_OK;
release_pages:
F2fsPutPage(parent, 1);
@@ -859,9 +865,9 @@
return err;
}
-void NodeMgr::TruncateNode(struct dnode_of_data *dn) {
+void NodeMgr::TruncateNode(dnode_of_data *dn) {
f2fs_sb_info &sbi = fs_->SbInfo();
- struct node_info ni;
+ node_info ni;
GetNodeInfo(dn->nid, &ni);
ZX_ASSERT(ni.blk_addr != NULL_ADDR);
@@ -887,7 +893,7 @@
dn->node_page = nullptr;
}
-int NodeMgr::TruncateDnode(struct dnode_of_data *dn) {
+zx_status_t NodeMgr::TruncateDnode(dnode_of_data *dn) {
Page *page = nullptr;
zx_status_t err = 0;
@@ -909,12 +915,12 @@
return 1;
}
-int NodeMgr::TruncateNodes(struct dnode_of_data *dn, unsigned int nofs, int ofs, int depth) {
- struct dnode_of_data rdn = *dn;
+zx_status_t NodeMgr::TruncateNodes(dnode_of_data *dn, uint32_t nofs, int ofs, int depth) {
+ dnode_of_data rdn = *dn;
Page *page = nullptr;
- struct f2fs_node *rn;
+ f2fs_node *rn;
nid_t child_nid;
- unsigned int child_nofs;
+ uint32_t child_nofs;
int freed = 0;
int i, ret;
zx_status_t err = 0;
@@ -926,7 +932,7 @@
if (err)
return err;
- rn = (struct f2fs_node *)PageAddress(page);
+ rn = (f2fs_node *)PageAddress(page);
if (depth < 3) {
for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
child_nid = LeToCpu(rn->in.nid[i]);
@@ -951,7 +957,7 @@
if (ret == (NIDS_PER_BLOCK + 1)) {
SetNid(page, i, 0, false);
child_nofs += ret;
- } else if (ret < 0 && ret != -ENOENT) {
+ } else if (ret < 0 && ret != ZX_ERR_NOT_FOUND) {
goto out_err;
}
}
@@ -973,7 +979,7 @@
return ret;
}
-int NodeMgr::TruncatePartialNodes(struct dnode_of_data *dn, struct f2fs_inode *ri, int *offset,
+zx_status_t NodeMgr::TruncatePartialNodes(dnode_of_data *dn, f2fs_inode *ri, int *offset,
int depth) {
Page *pages[2];
nid_t nid[3];
@@ -984,7 +990,7 @@
nid[0] = LeToCpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
if (!nid[0])
- return 0;
+ return ZX_OK;
/* get indirect nodes in the path */
for (i = 0; i < depth - 1; i++) {
@@ -1028,16 +1034,16 @@
/**
* All the block addresses of data and nodes should be nullified.
*/
-int NodeMgr::TruncateInodeBlocks(VnodeF2fs *vnode, pgoff_t from) {
+zx_status_t NodeMgr::TruncateInodeBlocks(VnodeF2fs *vnode, pgoff_t from) {
int cont = 1;
int level, offset[4], noffset[4];
- unsigned int nofs;
- struct f2fs_node *rn;
- struct dnode_of_data dn;
+ uint32_t nofs;
+ f2fs_node *rn;
+ dnode_of_data dn;
Page *page = nullptr;
zx_status_t err = 0;
- level = GetNodePath(from, offset, (unsigned int *)noffset);
+ level = GetNodePath(from, offset, reinterpret_cast<uint32_t *>(noffset));
err = GetNodePage(vnode->Ino(), &page);
if (err)
@@ -1048,7 +1054,7 @@
// unlock_page(page);
#endif
- rn = (f2fs_node *)PageAddress(page);
+ rn = static_cast<f2fs_node *>(PageAddress(page));
switch (level) {
case 0:
case 1:
@@ -1059,7 +1065,7 @@
if (!offset[level - 1])
goto skip_partial;
err = TruncatePartialNodes(&dn, &rn->i, offset, level);
- if (err < 0 && err != -ENOENT)
+ if (err < 0 && err != ZX_ERR_NOT_FOUND)
goto fail;
nofs += 1 + NIDS_PER_BLOCK;
break;
@@ -1068,7 +1074,7 @@
if (!offset[level - 1])
goto skip_partial;
err = TruncatePartialNodes(&dn, &rn->i, offset, level);
- if (err < 0 && err != -ENOENT)
+ if (err < 0 && err != ZX_ERR_NOT_FOUND)
goto fail;
break;
default:
@@ -1097,7 +1103,7 @@
default:
ZX_ASSERT(0);
}
- if (err < 0 && err != -ENOENT)
+ if (err < 0 && err != ZX_ERR_NOT_FOUND)
goto fail;
if (offset[1] == 0 && rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]) {
#if 0 // porting needed
@@ -1122,11 +1128,11 @@
return err > 0 ? 0 : err;
}
-int NodeMgr::RemoveInodePage(VnodeF2fs *vnode) {
+zx_status_t NodeMgr::RemoveInodePage(VnodeF2fs *vnode) {
f2fs_sb_info &sbi = fs_->SbInfo();
Page *page = nullptr;
nid_t ino = vnode->Ino();
- struct dnode_of_data dn;
+ dnode_of_data dn;
zx_status_t err = 0;
mutex_lock_op(&sbi, NODE_TRUNC);
@@ -1158,7 +1164,7 @@
SetNewDnode(&dn, vnode, page, page, ino);
TruncateNode(&dn);
} else if (vnode->i_blocks_ == 0) {
- struct node_info ni;
+ node_info ni;
GetNodeInfo(vnode->Ino(), &ni);
/* called after f2fs_new_inode() is failed */
@@ -1168,13 +1174,13 @@
ZX_ASSERT(0);
}
mutex_unlock_op(&sbi, NODE_TRUNC);
- return 0;
+ return ZX_OK;
}
zx_status_t NodeMgr::NewInodePage(Dir *parent, VnodeF2fs *child) {
f2fs_sb_info &sbi = fs_->SbInfo();
Page *page = nullptr;
- struct dnode_of_data dn;
+ dnode_of_data dn;
zx_status_t err = 0;
/* allocate inode page for new inode */
@@ -1189,10 +1195,10 @@
return ZX_OK;
}
-zx_status_t NodeMgr::NewNodePage(struct dnode_of_data *dn, unsigned int ofs, Page **out) {
+zx_status_t NodeMgr::NewNodePage(dnode_of_data *dn, uint32_t ofs, Page **out) {
f2fs_sb_info &sbi = fs_->SbInfo();
- //[[maybe_unused]] struct address_space *mapping = sbi.node_inode->i_mapping;
- struct node_info old_ni, new_ni;
+ //[[maybe_unused]] address_space *mapping = sbi.node_inode->i_mapping;
+ node_info old_ni, new_ni;
Page *page = nullptr;
int err;
@@ -1238,7 +1244,7 @@
return err;
}
-zx_status_t NodeMgr::ReadNodePage(Page *page, unsigned long nid, int type) {
+zx_status_t NodeMgr::ReadNodePage(Page *page, nid_t nid, int type) {
node_info ni;
GetNodeInfo(nid, &ni);
@@ -1270,7 +1276,7 @@
Page *page = nullptr;
f2fs_sb_info &sbi = fs_->SbInfo();
#if 0 // porting needed
- // struct address_space *mapping = sbi_->node_inode->i_mapping;
+ // address_space *mapping = sbi_->node_inode->i_mapping;
#endif
page = GrabCachePage(nullptr, F2FS_NODE_INO(&sbi), nid);
@@ -1293,14 +1299,14 @@
/**
* Return a locked page for the desired node page.
- * And, readahead MAX_RA_NODE number of node pages.
+ * And, readahead kMaxRaNode number of node pages.
*/
Page *NodeMgr::GetNodePageRa(Page *parent, int start) {
// TODO: IMPL Read ahead
return nullptr;
}
-void NodeMgr::SyncInodePage(struct dnode_of_data *dn) {
+void NodeMgr::SyncInodePage(dnode_of_data *dn) {
if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
dn->vnode->UpdateInode(dn->node_page);
} else if (dn->inode_page) {
@@ -1318,13 +1324,13 @@
}
}
-int NodeMgr::SyncNodePages(nid_t ino, struct WritebackControl *wbc) {
+int NodeMgr::SyncNodePages(nid_t ino, WritebackControl *wbc) {
#if 0 // porting needed
// f2fs_sb_info &sbi = fs_->SbInfo();
- // //struct address_space *mapping = sbi.node_inode->i_mapping;
+ // //address_space *mapping = sbi.node_inode->i_mapping;
// pgoff_t index, end;
// // TODO: IMPL
- // //struct pagevec pvec;
+ // //pagevec pvec;
// int step = ino ? 2 : 0;
// int nwritten = 0, wrote = 0;
@@ -1345,7 +1351,7 @@
// break;
// for (i = 0; i < nr_pages; i++) {
- // struct page *page = pvec.pages[i];
+ // page *page = pvec.pages[i];
// /*
// * flushing sequence with step:
@@ -1426,12 +1432,12 @@
return 0;
}
-zx_status_t NodeMgr::F2fsWriteNodePage(Page *page, struct WritebackControl *wbc) {
+zx_status_t NodeMgr::F2fsWriteNodePage(Page *page, WritebackControl *wbc) {
f2fs_sb_info &sbi = fs_->SbInfo();
nid_t nid;
- unsigned int nofs;
+ uint32_t nofs;
block_t new_addr;
- struct node_info ni;
+ node_info ni;
#if 0 // porting needed
// if (wbc->for_reclaim) {
@@ -1473,7 +1479,7 @@
}
#if 0 // porting needed
-int NodeMgr::F2fsWriteNodePages(struct address_space *mapping, struct WritebackControl *wbc) {
+int NodeMgr::F2fsWriteNodePages(struct address_space *mapping, WritebackControl *wbc) {
// struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
// struct block_device *bdev = sbi->sb->s_bdev;
// long nr_to_write = wbc->nr_to_write;
@@ -1516,7 +1522,7 @@
#endif
#if 0 // porting needed
-void NodeMgr::F2fsInvalidateNodePage(Page *page, unsigned long offset) {
+void NodeMgr::F2fsInvalidateNodePage(Page *page, uint64_t offset) {
f2fs_sb_info &sbi = fs_->SbInfo();
if (PageDirty(page))
@@ -1532,11 +1538,11 @@
}
#endif
-struct free_nid *NodeMgr::__LookupFreeNidList(nid_t n, list_node_t *head) {
+free_nid *NodeMgr::LookupFreeNidList(nid_t n, list_node_t *head) {
list_node_t *this_list;
- struct free_nid *i = nullptr;
+ free_nid *i = nullptr;
list_for_every(head, this_list) {
- i = containerof(this_list, struct free_nid, list);
+ i = containerof(this_list, free_nid, list);
if (i->nid == n)
break;
i = nullptr;
@@ -1544,7 +1550,7 @@
return i;
}
-void NodeMgr::__DelFromFreeNidList(struct free_nid *i) {
+void NodeMgr::DelFromFreeNidList(free_nid *i) {
list_delete(&i->list);
#if 0 // porting needed
// kmem_cache_free(free_nid_slab, i);
@@ -1552,10 +1558,10 @@
delete i;
}
-int NodeMgr::AddFreeNid(struct f2fs_nm_info *nm_i, nid_t nid) {
- struct free_nid *i;
+int NodeMgr::AddFreeNid(f2fs_nm_info *nm_i, nid_t nid) {
+ free_nid *i;
- if (nm_i->fcnt > 2 * MAX_FREE_NIDS)
+ if (nm_i->fcnt > 2 * kMaxFreeNids)
return 0;
retry:
#if 0 // porting needed (kmem_cache_alloc)
@@ -1569,10 +1575,10 @@
goto retry;
}
i->nid = nid;
- i->state = NID_NEW;
+ i->state = static_cast<int>(NidState::kNidNew);
SpinLock(&nm_i->free_nid_list_lock);
- if (__LookupFreeNidList(nid, &nm_i->free_nid_list)) {
+ if (LookupFreeNidList(nid, &nm_i->free_nid_list)) {
SpinUnlock(&nm_i->free_nid_list_lock);
#if 0 // porting needed
// kmem_cache_free(free_nid_slab, i);
@@ -1586,22 +1592,22 @@
return 1;
}
-void NodeMgr::RemoveFreeNid(struct f2fs_nm_info *nm_i, nid_t nid) {
- struct free_nid *i;
+void NodeMgr::RemoveFreeNid(f2fs_nm_info *nm_i, nid_t nid) {
+ free_nid *i;
SpinLock(&nm_i->free_nid_list_lock);
- i = __LookupFreeNidList(nid, &nm_i->free_nid_list);
- if (i && i->state == NID_NEW) {
- __DelFromFreeNidList(i);
+ i = LookupFreeNidList(nid, &nm_i->free_nid_list);
+ if (i && i->state == static_cast<int>(NidState::kNidNew)) {
+ DelFromFreeNidList(i);
nm_i->fcnt--;
}
SpinUnlock(&nm_i->free_nid_list_lock);
}
-int NodeMgr::ScanNatPage(struct f2fs_nm_info *nm_i, Page *nat_page, nid_t start_nid) {
- struct f2fs_nat_block *nat_blk = static_cast<f2fs_nat_block *>(PageAddress(nat_page));
+int NodeMgr::ScanNatPage(f2fs_nm_info *nm_i, Page *nat_page, nid_t start_nid) {
+ f2fs_nat_block *nat_blk = static_cast<f2fs_nat_block *>(PageAddress(nat_page));
block_t blk_addr;
int fcnt = 0;
- unsigned int i;
+ uint32_t i;
/* 0 nid should not be used */
if (start_nid == 0)
@@ -1620,13 +1626,13 @@
void NodeMgr::BuildFreeNids() {
f2fs_sb_info &sbi = fs_->SbInfo();
- [[maybe_unused]] struct free_nid *fnid, *next_fnid;
- struct f2fs_nm_info *nm_i = NM_I(&sbi);
- struct curseg_info *curseg = SegMgr::CURSEG_I(&sbi, CURSEG_HOT_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
+ free_nid *fnid, *next_fnid;
+ f2fs_nm_info *nm_i = NM_I(&sbi);
+ curseg_info *curseg = SegMgr::CURSEG_I(&sbi, CURSEG_HOT_DATA);
+ f2fs_summary_block *sum = curseg->sum_blk;
nid_t nid = 0;
bool is_cycled = false;
- unsigned long fcnt = 0;
+ uint64_t fcnt = 0;
int i;
nid = nm_i->next_scan_nid;
@@ -1646,7 +1652,7 @@
nid = 0;
is_cycled = true;
}
- if (fcnt > MAX_FREE_NIDS)
+ if (fcnt > kMaxFreeNids)
break;
if (is_cycled && nm_i->init_scan_nid <= nid)
break;
@@ -1669,11 +1675,11 @@
/* remove the free nids from current allocated nids */
list_for_every_entry_safe (&nm_i->free_nid_list, fnid, next_fnid, free_nid, list) {
- struct nat_entry *ne;
+ nat_entry *ne;
ReadLock(&nm_i->nat_tree_lock);
- ne = __LookupNatCache(nm_i, fnid->nid);
- if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
+ ne = LookupNatCache(nm_i, fnid->nid);
+ if (ne && NatGetBlkaddr(ne) != NULL_ADDR)
RemoveFreeNid(nm_i, fnid->nid);
ReadUnlock(&nm_i->nat_tree_lock);
}
@@ -1686,8 +1692,8 @@
*/
bool NodeMgr::AllocNid(nid_t *nid) {
f2fs_sb_info &sbi = fs_->SbInfo();
- struct f2fs_nm_info *nm_i = NM_I(&sbi);
- struct free_nid *i = nullptr;
+ f2fs_nm_info *nm_i = NM_I(&sbi);
+ free_nid *i = nullptr;
list_node_t *this_list;
retry:
mtx_lock(&nm_i->build_lock);
@@ -1715,14 +1721,14 @@
ZX_ASSERT(!list_is_empty(&nm_i->free_nid_list));
list_for_every(&nm_i->free_nid_list, this_list) {
- i = containerof(this_list, struct free_nid, list);
- if (i->state == NID_NEW)
+ i = containerof(this_list, free_nid, list);
+ if (i->state == static_cast<int>(NidState::kNidNew))
break;
}
- ZX_ASSERT(i->state == NID_NEW);
+ ZX_ASSERT(i->state == static_cast<int>(NidState::kNidNew));
*nid = i->nid;
- i->state = NID_ALLOC;
+ i->state = static_cast<int>(NidState::kNidAlloc);
nm_i->fcnt--;
SpinUnlock(&nm_i->free_nid_list_lock);
return true;
@@ -1733,14 +1739,14 @@
*/
void NodeMgr::AllocNidDone(nid_t nid) {
f2fs_sb_info &sbi = fs_->SbInfo();
- struct f2fs_nm_info *nm_i = NM_I(&sbi);
- struct free_nid *i;
+ f2fs_nm_info *nm_i = NM_I(&sbi);
+ free_nid *i;
SpinLock(&nm_i->free_nid_list_lock);
- i = __LookupFreeNidList(nid, &nm_i->free_nid_list);
+ i = LookupFreeNidList(nid, &nm_i->free_nid_list);
if (i) {
- ZX_ASSERT(i->state == NID_ALLOC);
- __DelFromFreeNidList(i);
+ ZX_ASSERT(i->state == static_cast<int>(NidState::kNidAlloc));
+ DelFromFreeNidList(i);
}
SpinUnlock(&nm_i->free_nid_list_lock);
}
@@ -1754,7 +1760,7 @@
AddFreeNid(NM_I(&sbi), nid);
}
-void NodeMgr::RecoverNodePage(Page *page, struct f2fs_summary *sum, struct node_info *ni,
+void NodeMgr::RecoverNodePage(Page *page, f2fs_summary *sum, node_info *ni,
block_t new_blkaddr) {
fs_->Segmgr().RewriteNodePage(page, sum, ni->blk_addr, new_blkaddr);
SetNodeAddr(ni, new_blkaddr);
@@ -1763,10 +1769,10 @@
zx_status_t NodeMgr::RecoverInodePage(Page *page) {
f2fs_sb_info &sbi = fs_->SbInfo();
- //[[maybe_unused]] struct address_space *mapping = sbi.node_inode->i_mapping;
- struct f2fs_node *src, *dst;
+ //[[maybe_unused]] address_space *mapping = sbi.node_inode->i_mapping;
+ f2fs_node *src, *dst;
nid_t ino = InoOfNode(page);
- struct node_info old_ni, new_ni;
+ node_info old_ni, new_ni;
Page *ipage = nullptr;
ipage = GrabCachePage(nullptr, F2FS_NODE_INO(&sbi), ino);
@@ -1783,10 +1789,11 @@
#endif
FillNodeFooter(ipage, ino, ino, 0, true);
- src = (struct f2fs_node *)PageAddress(page);
- dst = (struct f2fs_node *)PageAddress(ipage);
+ src = static_cast<f2fs_node *>(PageAddress(page));
+ dst = static_cast<f2fs_node *>(PageAddress(ipage));
- memcpy(dst, src, (unsigned long)&src->i.i_ext - (unsigned long)&src->i);
+ memcpy(dst, src, reinterpret_cast<uint64_t>(&src->i.i_ext)
+ - reinterpret_cast<uint64_t>(&src->i));
dst->i.i_size = 0;
dst->i.i_blocks = 1;
dst->i.i_links = 1;
@@ -1802,11 +1809,11 @@
return ZX_OK;
}
-int NodeMgr::RestoreNodeSummary(F2fs *fs, unsigned int segno, struct f2fs_summary_block *sum) {
+zx_status_t NodeMgr::RestoreNodeSummary(F2fs *fs, uint32_t segno, f2fs_summary_block *sum) {
f2fs_sb_info &sbi = fs->SbInfo();
- struct f2fs_node *rn;
- struct f2fs_summary *sum_entry;
- struct Page *page = nullptr;
+ f2fs_node *rn;
+ f2fs_summary *sum_entry;
+ Page *page = nullptr;
block_t addr;
int i, last_offset;
@@ -1830,7 +1837,7 @@
if (VnodeF2fs::Readpage(fs, page, addr, kReadSync))
goto out;
- rn = (struct f2fs_node *)PageAddress(page);
+ rn = static_cast<f2fs_node *>(PageAddress(page));
sum_entry->nid = rn->footer.nid;
sum_entry->version = 0;
sum_entry->ofs_in_node = 0;
@@ -1850,14 +1857,14 @@
//__free_pages(page, 0);
#endif
F2fsPutPage(page, 1);
- return 0;
+ return ZX_OK;
}
bool NodeMgr::FlushNatsInJournal() {
f2fs_sb_info &sbi = fs_->SbInfo();
- struct f2fs_nm_info *nm_i = NM_I(&sbi);
- struct curseg_info *curseg = fs_->Segmgr().CURSEG_I(&sbi, CURSEG_HOT_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
+ f2fs_nm_info *nm_i = NM_I(&sbi);
+ curseg_info *curseg = fs_->Segmgr().CURSEG_I(&sbi, CURSEG_HOT_DATA);
+ f2fs_summary_block *sum = curseg->sum_blk;
int i;
mtx_lock(&curseg->curseg_mutex);
@@ -1868,16 +1875,16 @@
}
for (i = 0; i < nats_in_cursum(sum); i++) {
- struct nat_entry *ne;
- struct f2fs_nat_entry raw_ne;
+ nat_entry *ne;
+ f2fs_nat_entry raw_ne;
nid_t nid = LeToCpu(nid_in_journal(sum, i));
raw_ne = nat_in_journal(sum, i);
retry:
WriteLock(&nm_i->nat_tree_lock);
- ne = __LookupNatCache(nm_i, nid);
+ ne = LookupNatCache(nm_i, nid);
if (ne) {
- __set_nat_cache_dirty(nm_i, ne);
+ SetNatCacheDirty(nm_i, ne);
WriteUnlock(&nm_i->nat_tree_lock);
continue;
}
@@ -1886,10 +1893,10 @@
WriteUnlock(&nm_i->nat_tree_lock);
goto retry;
}
- nat_set_blkaddr(ne, LeToCpu(raw_ne.block_addr));
- nat_set_ino(ne, LeToCpu(raw_ne.ino));
- nat_set_version(ne, raw_ne.version);
- __set_nat_cache_dirty(nm_i, ne);
+ NatSetBlkaddr(ne, LeToCpu(raw_ne.block_addr));
+ NatSetIno(ne, LeToCpu(raw_ne.ino));
+ NatSetVersion(ne, raw_ne.version);
+ SetNatCacheDirty(nm_i, ne);
WriteUnlock(&nm_i->nat_tree_lock);
}
update_nats_in_cursum(sum, -i);
@@ -1902,12 +1909,12 @@
*/
void NodeMgr::FlushNatEntries() {
f2fs_sb_info &sbi = fs_->SbInfo();
- struct f2fs_nm_info *nm_i = NM_I(&sbi);
- struct curseg_info *curseg = fs_->Segmgr().CURSEG_I(&sbi, CURSEG_HOT_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
+ f2fs_nm_info *nm_i = NM_I(&sbi);
+ curseg_info *curseg = fs_->Segmgr().CURSEG_I(&sbi, CURSEG_HOT_DATA);
+ f2fs_summary_block *sum = curseg->sum_blk;
list_node_t *cur, *n;
Page *page = nullptr;
- struct f2fs_nat_block *nat_blk = nullptr;
+ f2fs_nat_block *nat_blk = nullptr;
nid_t start_nid = 0, end_nid = 0;
bool flushed;
@@ -1920,16 +1927,16 @@
/* 1) flush dirty nat caches */
list_for_every_safe(&nm_i->dirty_nat_entries, cur, n) {
- struct nat_entry *ne;
+ nat_entry *ne;
nid_t nid;
- struct f2fs_nat_entry raw_ne;
+ f2fs_nat_entry raw_ne;
int offset = -1;
block_t old_blkaddr, new_blkaddr;
ne = containerof(cur, nat_entry, list);
- nid = nat_get_nid(ne);
+ nid = NatGetNid(ne);
- if (nat_get_blkaddr(ne) == NEW_ADDR)
+ if (NatGetBlkaddr(ne) == NEW_ADDR)
continue;
if (flushed)
goto to_nat_page;
@@ -1951,7 +1958,7 @@
F2fsPutPage(page, 1);
page = nullptr;
}
- start_nid = START_NID(nid);
+ start_nid = StartNid(nid);
end_nid = start_nid + NAT_ENTRY_PER_BLOCK - 1;
/*
@@ -1959,18 +1966,18 @@
* count, mapped and lock
*/
page = GetNextNatPage(start_nid);
- nat_blk = (f2fs_nat_block *)PageAddress(page);
+ nat_blk = static_cast<f2fs_nat_block *>(PageAddress(page));
}
ZX_ASSERT(nat_blk);
raw_ne = nat_blk->entries[nid - start_nid];
old_blkaddr = LeToCpu(raw_ne.block_addr);
flush_now:
- new_blkaddr = nat_get_blkaddr(ne);
+ new_blkaddr = NatGetBlkaddr(ne);
- raw_ne.ino = CpuToLe(nat_get_ino(ne));
+ raw_ne.ino = CpuToLe(NatGetIno(ne));
raw_ne.block_addr = CpuToLe(new_blkaddr);
- raw_ne.version = nat_get_version(ne);
+ raw_ne.version = NatGetVersion(ne);
if (offset < 0) {
nat_blk->entries[nid - start_nid] = raw_ne;
@@ -1979,16 +1986,16 @@
nid_in_journal(sum, offset) = CpuToLe(nid);
}
- if (nat_get_blkaddr(ne) == NULL_ADDR) {
+ if (NatGetBlkaddr(ne) == NULL_ADDR) {
WriteLock(&nm_i->nat_tree_lock);
- __DelFromNatCache(nm_i, ne);
+ DelFromNatCache(nm_i, ne);
WriteUnlock(&nm_i->nat_tree_lock);
/* We can reuse this freed nid at this point */
AddFreeNid(NM_I(&sbi), nid);
} else {
WriteLock(&nm_i->nat_tree_lock);
- __clear_nat_cache_dirty(nm_i, ne);
+ ClearNatCacheDirty(nm_i, ne);
ne->checkpointed = true;
WriteUnlock(&nm_i->nat_tree_lock);
}
@@ -2004,15 +2011,15 @@
F2fsPutPage(page, 1);
/* 2) shrink nat caches if necessary */
- TryToFreeNats(nm_i->nat_cnt - NM_WOUT_THRESHOLD);
+ TryToFreeNats(nm_i->nat_cnt - kNmWoutThreshold);
}
zx_status_t NodeMgr::InitNodeManager() {
f2fs_sb_info &sbi = fs_->SbInfo();
- struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(&sbi);
- struct f2fs_nm_info *nm_i = NM_I(&sbi);
- unsigned char *version_bitmap;
- unsigned int nat_segs, nat_blocks;
+ f2fs_super_block *sb_raw = F2FS_RAW_SUPER(&sbi);
+ f2fs_nm_info *nm_i = NM_I(&sbi);
+ uint8_t *version_bitmap;
+ uint32_t nat_segs, nat_blocks;
nm_i->nat_blkaddr = LeToCpu(sb_raw->nat_blkaddr);
/* segment_count_nat includes pair segment so divide to 2. */
@@ -2044,7 +2051,7 @@
if (!nm_i->nat_bitmap)
return ZX_ERR_NO_MEMORY;
- version_bitmap = static_cast<unsigned char *>(__bitmap_ptr(&sbi, NAT_BITMAP));
+ version_bitmap = static_cast<uint8_t *>(__bitmap_ptr(&sbi, NAT_BITMAP));
if (!version_bitmap)
return ZX_ERR_INVALID_ARGS;
@@ -2072,11 +2079,11 @@
void NodeMgr::DestroyNodeManager() {
f2fs_sb_info &sbi = fs_->SbInfo();
- struct f2fs_nm_info *nm_i = NM_I(&sbi);
- [[maybe_unused]] struct free_nid *i, *next_i;
- [[maybe_unused]] struct nat_entry *natvec[NATVEC_SIZE];
- [[maybe_unused]] nid_t nid = 0;
- [[maybe_unused]] unsigned int found;
+ f2fs_nm_info *nm_i = NM_I(&sbi);
+ free_nid *i, *next_i;
+ nat_entry *natvec[kNatvecSize];
+ nid_t nid = 0;
+ uint32_t found;
if (!nm_i)
return;
@@ -2084,8 +2091,8 @@
/* destroy free nid list */
SpinLock(&nm_i->free_nid_list_lock);
list_for_every_entry_safe (&nm_i->free_nid_list, i, next_i, free_nid, list) {
- ZX_ASSERT(i->state != NID_ALLOC);
- __DelFromFreeNidList(i);
+ ZX_ASSERT(i->state != static_cast<int>(NidState::kNidAlloc));
+ DelFromFreeNidList(i);
nm_i->fcnt--;
}
ZX_ASSERT(!nm_i->fcnt);
@@ -2093,12 +2100,12 @@
/* destroy nat cache */
WriteLock(&nm_i->nat_tree_lock);
- while ((found = __GangLookupNatCache(nm_i, nid, NATVEC_SIZE, natvec))) {
- unsigned idx;
+ while ((found = GangLookupNatCache(nm_i, nid, kNatvecSize, natvec))) {
+ uint32_t idx;
for (idx = 0; idx < found; idx++) {
- struct nat_entry *e = natvec[idx];
- nid = nat_get_nid(e) + 1;
- __DelFromNatCache(nm_i, e);
+ nat_entry *e = natvec[idx];
+ nid = NatGetNid(e) + 1;
+ DelFromNatCache(nm_i, e);
}
}
// TODO: Check nm_i->nat_cnt
@@ -2111,21 +2118,21 @@
delete nm_i;
}
-int NodeMgr::CreateNodeManagerCaches() {
+zx_status_t NodeMgr::CreateNodeManagerCaches() {
#if 0 // porting needed
// nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
- // sizeof(struct nat_entry), NULL);
+ // sizeof(nat_entry), NULL);
// if (!nat_entry_slab)
// return -ENOMEM;
// free_nid_slab = f2fs_kmem_cache_create("free_nid",
- // sizeof(struct free_nid), NULL);
+ // sizeof(free_nid), NULL);
// if (!free_nid_slab) {
// kmem_cache_destroy(nat_entry_slab);
// return -ENOMEM;
// }
#endif
- return 0;
+ return ZX_OK;
}
void NodeMgr::DestroyNodeManagerCaches() {
diff --git a/node.h b/node.h
index 625fa7b..47f86f5 100644
--- a/node.h
+++ b/node.h
@@ -5,69 +5,75 @@
#ifndef F2FS_NODE_H_
#define F2FS_NODE_H_
+#include "third_party/f2fs/f2fs_types.h"
#include "zircon/types.h"
namespace f2fs {
/* start node id of a node block dedicated to the given node id */
-#define START_NID(nid) ((nid / NAT_ENTRY_PER_BLOCK) * NAT_ENTRY_PER_BLOCK)
+inline uint32_t StartNid(uint32_t nid) {
+ return (nid / NAT_ENTRY_PER_BLOCK) * NAT_ENTRY_PER_BLOCK;
+}
/* node block offset on the NAT area dedicated to the given start node id */
-#define NAT_BLOCK_OFFSET(start_nid) (start_nid / NAT_ENTRY_PER_BLOCK)
+//#define NAT_BLOCK_OFFSET(start_nid) (start_nid / NAT_ENTRY_PER_BLOCK)
+inline uint64_t NatBlockOffset(uint32_t start_nid) {
+ return start_nid / NAT_ENTRY_PER_BLOCK;
+}
/* # of pages to perform readahead before building free nids */
-#define FREE_NID_PAGES 4
+constexpr int kFreeNidPages = 4;
/* maximum # of free node ids to produce during build_free_nids */
-#define MAX_FREE_NIDS (NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES)
+constexpr int kMaxFreeNids = NAT_ENTRY_PER_BLOCK * kFreeNidPages;
/* maximum readahead size for node during getting data blocks */
-#define MAX_RA_NODE 128
+constexpr int kMaxRaNode = 128;
/* maximum cached nat entries to manage memory footprint */
-#define NM_WOUT_THRESHOLD (64 * NAT_ENTRY_PER_BLOCK)
+constexpr uint32_t kNmWoutThreshold = 64 * NAT_ENTRY_PER_BLOCK;
/* vector size for gang look-up from nat cache that consists of radix tree */
-#define NATVEC_SIZE 64
+constexpr uint32_t kNatvecSize = 64;
/*
* For node information
*/
struct node_info {
- nid_t nid; /* node id */
- nid_t ino; /* inode number of the node's owner */
- block_t blk_addr; /* block address of the node */
- unsigned char version; /* version of the node */
+ nid_t nid = 0; /* node id */
+ nid_t ino = 0; /* inode number of the node's owner */
+ block_t blk_addr = 0; /* block address of the node */
+ uint8_t version = 0; /* version of the node */
};
struct nat_entry {
list_node_t list; /* for clean or dirty nat list */
- bool checkpointed; /* whether it is checkpointed or not */
- struct node_info ni; /* in-memory node information */
+ bool checkpointed = false; /* whether it is checkpointed or not */
+ node_info ni; /* in-memory node information */
};
-#define nat_get_nid(nat) (nat->ni.nid)
-#define nat_set_nid(nat, n) (nat->ni.nid = n)
-#define nat_get_blkaddr(nat) (nat->ni.blk_addr)
-#define nat_set_blkaddr(nat, b) (nat->ni.blk_addr = b)
-#define nat_get_ino(nat) (nat->ni.ino)
-#define nat_set_ino(nat, i) (nat->ni.ino = i)
-#define nat_get_version(nat) (nat->ni.version)
-#define nat_set_version(nat, v) (nat->ni.version = v)
+inline uint32_t NatGetNid(nat_entry *nat) { return nat->ni.nid; }
+inline void NatSetNid(nat_entry *nat, nid_t n) { nat->ni.nid = n; }
+inline block_t NatGetBlkaddr(nat_entry *nat) { return nat->ni.blk_addr; }
+inline void NatSetBlkaddr(nat_entry *nat, block_t b) { nat->ni.blk_addr = b; }
+inline uint32_t NatGetIno(nat_entry *nat) { return nat->ni.ino; }
+inline void NatSetIno(nat_entry *nat, uint32_t i) { nat->ni.ino = i; }
+inline uint8_t NatGetVersion(nat_entry *nat) { return nat->ni.version; }
+inline void NatSetVersion(nat_entry *nat, uint8_t v) { nat->ni.version = v; }
-#define inc_node_version(version) (++version)
+inline uint8_t IncNodeVersion(uint8_t version) { return ++version; }
/*
* For free nid mangement
*/
-enum nid_state {
- NID_NEW, /* newly added to free nid list */
- NID_ALLOC /* it is allocated */
+enum class NidState {
+ kNidNew = 0, /* newly added to free nid list */
+ kNidAlloc, /* it is allocated */
};
struct free_nid {
list_node_t list; /* for free node id list */
- nid_t nid; /* node id */
- int state; /* in use or not: NID_NEW or NID_ALLOC */
+ nid_t nid = 0; /* node id */
+ int state = 0; /* in use or not: kNidNew or kNidAlloc */
};
class NodeMgr {
@@ -90,27 +96,27 @@
zx_status_t NextFreeNid(nid_t *nid);
void NodeInfoFromRawNat(node_info *ni, f2fs_nat_entry *raw_ne);
- static int RestoreNodeSummary(F2fs *fs, unsigned int segno, struct f2fs_summary_block *sum);
+ static zx_status_t RestoreNodeSummary(F2fs *fs, uint32_t segno, f2fs_summary_block *sum);
zx_status_t BuildNodeManager();
void DestroyNodeManager();
- zx_status_t ReadNodePage(Page *page, unsigned long nid, int type);
+ zx_status_t ReadNodePage(Page *page, nid_t nid, int type);
zx_status_t GetNodePage(pgoff_t nid, Page **out);
- zx_status_t GetDnodeOfData(struct dnode_of_data *dn, pgoff_t index, int ro);
+ zx_status_t GetDnodeOfData(dnode_of_data *dn, pgoff_t index, int ro);
- void FillNodeFooter(Page *page, nid_t nid, nid_t ino, unsigned int ofs, bool reset);
+ void FillNodeFooter(Page *page, nid_t nid, nid_t ino, uint32_t ofs, bool reset);
void CopyNodeFooter(Page *dst, Page *src);
- unsigned int OfsOfNode(Page *node_page);
+ uint32_t OfsOfNode(Page *node_page);
static int IsColdNode(Page *page);
static int IsColdFile(VnodeF2fs *vnode);
static int IsColdData(Page *page);
- unsigned char IsDentDnode(Page *page);
- unsigned char IsFsyncDnode(Page *page);
+ uint8_t IsDentDnode(Page *page);
+ uint8_t IsFsyncDnode(Page *page);
- unsigned long long CpverOfNode(Page *node_page);
+ uint64_t CpverOfNode(Page *node_page);
void FillNodeFooterBlkaddr(Page *page, block_t blkaddr);
static block_t NextBlkaddrOfNode(Page *node_page);
@@ -119,42 +125,42 @@
nid_t NidOfNode(Page *node_page);
bool IS_DNODE(Page *node_page);
- void GetNodeInfo(nid_t nid, struct node_info *ni);
- int SyncNodePages(nid_t ino, struct WritebackControl *wbc);
- void SyncInodePage(struct dnode_of_data *dn);
+ void GetNodeInfo(nid_t nid, node_info *ni);
+ int SyncNodePages(nid_t ino, WritebackControl *wbc);
+ void SyncInodePage(dnode_of_data *dn);
bool AllocNid(nid_t *nid);
void AllocNidFailed(nid_t nid);
void AllocNidDone(nid_t nid);
- int TruncateInodeBlocks(VnodeF2fs *vnode, pgoff_t from);
+ zx_status_t TruncateInodeBlocks(VnodeF2fs *vnode, pgoff_t from);
- int RemoveInodePage(VnodeF2fs *vnode);
+ zx_status_t RemoveInodePage(VnodeF2fs *vnode);
zx_status_t NewInodePage(Dir *parent, VnodeF2fs *child);
int IsCheckpointedNode(nid_t nid);
void ClearColdData(Page *page);
- void DecValidNodeCount(struct f2fs_sb_info *sbi, VnodeF2fs *vnode, unsigned int count);
+ void DecValidNodeCount(f2fs_sb_info *sbis, VnodeF2fs *vnode, uint32_t count);
void GetNatBitmap(void *addr);
bool FlushNatsInJournal();
void FlushNatEntries();
- int F2fsWriteNodePage(Page *page, struct WritebackControl *wbc);
- int F2fsWriteNodePages(struct address_space *mapping, struct WritebackControl *wbc);
+ int F2fsWriteNodePage(Page *page, WritebackControl *wbc);
+ int F2fsWriteNodePages(struct address_space *mapping, WritebackControl *wbc);
zx_status_t RecoverInodePage(Page *page);
- void RecoverNodePage(Page *page, struct f2fs_summary *sum, struct node_info *ni,
+ void RecoverNodePage(Page *page, f2fs_summary *sum, node_info *ni,
block_t new_blkaddr);
private:
F2fs *fs_;
// Inline functions
- bool inc_valid_node_count(struct f2fs_sb_info *sbi, VnodeF2fs *vnode, unsigned int count);
+ bool inc_valid_node_count(f2fs_sb_info *sbi, VnodeF2fs *vnode, uint32_t count);
pgoff_t CurrentNatAddr(nid_t start);
bool IsUpdatedNatPage(nid_t start);
pgoff_t NextNatAddr(pgoff_t block_addr);
- void SetToNextNat(struct f2fs_nm_info *nm_i, nid_t start_nid);
+ void SetToNextNat(f2fs_nm_info *nm_i, nid_t start_nid);
void SetNid(Page *p, int off, nid_t nid, bool i);
nid_t GetNid(Page *p, int off, bool i);
@@ -170,23 +176,23 @@
Page *GetCurrentNatPage(nid_t nid);
Page *GetNextNatPage(nid_t nid);
void RaNatPages(nid_t nid);
- struct nat_entry *__LookupNatCache(struct f2fs_nm_info *nm_i, nid_t n);
- unsigned int __GangLookupNatCache(struct f2fs_nm_info *nm_i, nid_t start, unsigned int nr,
- struct nat_entry **ep);
- void __DelFromNatCache(struct f2fs_nm_info *nm_i, struct nat_entry *e);
+ nat_entry *LookupNatCache(f2fs_nm_info *nm_i, nid_t n);
+ uint32_t GangLookupNatCache(f2fs_nm_info *nm_i, nid_t start, uint32_t nr,
+ nat_entry **ep);
+ void DelFromNatCache(f2fs_nm_info *nm_i, nat_entry *e);
- struct nat_entry *GrabNatEntry(struct f2fs_nm_info *nm_i, nid_t nid);
- void CacheNatEntry(struct f2fs_nm_info *nm_i, nid_t nid, struct f2fs_nat_entry *ne);
- void SetNodeAddr(struct node_info *ni, block_t new_blkaddr);
+ nat_entry *GrabNatEntry(f2fs_nm_info *nm_i, nid_t nid);
+ void CacheNatEntry(f2fs_nm_info *nm_i, nid_t nid, f2fs_nat_entry *ne);
+ void SetNodeAddr(node_info *ni, block_t new_blkaddr);
int TryToFreeNats(int nr_shrink);
- int GetNodePath(long block, int offset[4], unsigned int noffset[4]);
- void TruncateNode(struct dnode_of_data *dn);
- int TruncateDnode(struct dnode_of_data *dn);
- int TruncateNodes(struct dnode_of_data *dn, unsigned int nofs, int ofs, int depth);
- int TruncatePartialNodes(struct dnode_of_data *dn, struct f2fs_inode *ri, int *offset, int depth);
+ int GetNodePath(long block, int offset[4], uint32_t noffset[4]);
+ void TruncateNode(dnode_of_data *dn);
+ zx_status_t TruncateDnode(dnode_of_data *dn);
+ zx_status_t TruncateNodes(dnode_of_data *dn, uint32_t nofs, int ofs, int depth);
+ zx_status_t TruncatePartialNodes(dnode_of_data *dn, f2fs_inode *ri, int *offset, int depth);
- zx_status_t NewNodePage(struct dnode_of_data *dn, unsigned int ofs, Page **out);
+ zx_status_t NewNodePage(dnode_of_data *dn, uint32_t ofs, Page **out);
#if 0 // porting needed
void RaNodePage(nid_t nid);
#endif
@@ -194,21 +200,21 @@
#if 0 // porting needed
int F2fsWriteNodePages(struct address_space *mapping,
- struct WritebackControl *wbc);
+ WritebackControl *wbc);
int F2fsSetNodePageDirty(Page *page);
- void F2fsInvalidateNodePage(Page *page, unsigned long offset);
+ void F2fsInvalidateNodePage(Page *page, uint64_t offset);
int F2fsReleaseNodePage(Page *page, gfp_t wait);
#endif
- struct free_nid *__LookupFreeNidList(nid_t n, list_node_t *head);
- void __DelFromFreeNidList(struct free_nid *i);
- int AddFreeNid(struct f2fs_nm_info *nm_i, nid_t nid);
- void RemoveFreeNid(struct f2fs_nm_info *nm_i, nid_t nid);
- int ScanNatPage(struct f2fs_nm_info *nm_i, Page *nat_page, nid_t start_nid);
+ free_nid *LookupFreeNidList(nid_t n, list_node_t *head);
+ void DelFromFreeNidList(free_nid *i);
+ int AddFreeNid(f2fs_nm_info *nm_i, nid_t nid);
+ void RemoveFreeNid(f2fs_nm_info *nm_i, nid_t nid);
+ int ScanNatPage(f2fs_nm_info *nm_i, Page *nat_page, nid_t start_nid);
void BuildFreeNids();
zx_status_t InitNodeManager();
- int CreateNodeManagerCaches();
+ zx_status_t CreateNodeManagerCaches();
void DestroyNodeManagerCaches();
};