blob: e5cb9bcb1edf872b3f976b7c655c9b13431f4b71 [file] [log] [blame]
// Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <string.h>
#include <sys/stat.h>
#include <threads.h>
#include <zircon/types.h>
#include "f2fs.h"
#include "zircon/errors.h"
#include "zircon/types.h"
namespace f2fs {
#define __set_nat_cache_dirty(nm_i, ne) list_move_tail(&nm_i->dirty_nat_entries, &ne->list);
#define __clear_nat_cache_dirty(nm_i, ne) list_move_tail(&nm_i->nat_entries, &ne->list);
inline void NodeMgr::NodeInfoFromRawNat(node_info *ni, f2fs_nat_entry *raw_ne) {
ni->ino = LeToCpu(raw_ne->ino);
ni->blk_addr = LeToCpu(raw_ne->block_addr);
ni->version = raw_ne->version;
}
inline bool NodeMgr::inc_valid_node_count(struct f2fs_sb_info *sbi, VnodeF2fs *vnode,
unsigned int count) {
block_t valid_block_count;
unsigned int valid_node_count;
SpinLock(&sbi->stat_lock);
valid_block_count = sbi->total_valid_block_count + (block_t)count;
sbi->alloc_valid_block_count += (block_t)count;
valid_node_count = sbi->total_valid_node_count + count;
if (valid_block_count > sbi->user_block_count) {
SpinUnlock(&sbi->stat_lock);
return false;
}
if (valid_node_count > sbi->total_node_count) {
SpinUnlock(&sbi->stat_lock);
return false;
}
if (vnode)
vnode->i_blocks_ += count;
sbi->total_valid_node_count = valid_node_count;
sbi->total_valid_block_count = valid_block_count;
SpinUnlock(&sbi->stat_lock);
return true;
}
/*
* inline functions
*/
zx_status_t NodeMgr::NextFreeNid(nid_t *nid) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct f2fs_nm_info *nm_i = NM_I(&sbi);
struct free_nid *fnid;
if (nm_i->fcnt <= 0)
return ZX_ERR_OUT_OF_RANGE;
SpinLock(&nm_i->free_nid_list_lock);
fnid = containerof(nm_i->free_nid_list.next, struct free_nid, list);
*nid = fnid->nid;
SpinUnlock(&nm_i->free_nid_list_lock);
return ZX_OK;
}
void NodeMgr::GetNatBitmap(void *addr) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct f2fs_nm_info *nm_i = NM_I(&sbi);
memcpy(addr, nm_i->nat_bitmap, nm_i->bitmap_size);
memcpy(nm_i->nat_prev_bitmap, nm_i->nat_bitmap, nm_i->bitmap_size);
}
inline pgoff_t NodeMgr::CurrentNatAddr(nid_t start) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct f2fs_nm_info *nm_i = NM_I(&sbi);
pgoff_t block_off;
pgoff_t block_addr;
int seg_off;
block_off = NAT_BLOCK_OFFSET(start);
seg_off = block_off >> sbi.log_blocks_per_seg;
block_addr = (pgoff_t)(nm_i->nat_blkaddr + (seg_off << sbi.log_blocks_per_seg << 1) +
(block_off & ((1 << sbi.log_blocks_per_seg) - 1)));
if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
block_addr += sbi.blocks_per_seg;
return block_addr;
}
inline bool NodeMgr::IsUpdatedNatPage(nid_t start) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct f2fs_nm_info *nm_i = NM_I(&sbi);
pgoff_t block_off;
block_off = NAT_BLOCK_OFFSET(start);
return (f2fs_test_bit(block_off, nm_i->nat_bitmap) ^
f2fs_test_bit(block_off, nm_i->nat_prev_bitmap));
}
inline pgoff_t NodeMgr::NextNatAddr(pgoff_t block_addr) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct f2fs_nm_info *nm_i = NM_I(&sbi);
block_addr -= nm_i->nat_blkaddr;
if ((block_addr >> sbi.log_blocks_per_seg) % 2)
block_addr -= sbi.blocks_per_seg;
else
block_addr += sbi.blocks_per_seg;
return block_addr + nm_i->nat_blkaddr;
}
inline void NodeMgr::SetToNextNat(struct f2fs_nm_info *nm_i, nid_t start_nid) {
unsigned int block_off = NAT_BLOCK_OFFSET(start_nid);
if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
f2fs_clear_bit(block_off, nm_i->nat_bitmap);
else
f2fs_set_bit(block_off, nm_i->nat_bitmap);
}
inline void NodeMgr::FillNodeFooter(Page *page, nid_t nid, nid_t ino, unsigned int ofs,
bool reset) {
void *kaddr = PageAddress(page);
struct f2fs_node *rn = (struct f2fs_node *)kaddr;
if (reset)
memset(rn, 0, sizeof(*rn));
rn->footer.nid = CpuToLe(nid);
rn->footer.ino = CpuToLe(ino);
rn->footer.flag = CpuToLe(ofs << OFFSET_BIT_SHIFT);
}
void NodeMgr::CopyNodeFooter(Page *dst, Page *src) {
void *src_addr = PageAddress(src);
void *dst_addr = PageAddress(dst);
struct f2fs_node *src_rn = (struct f2fs_node *)src_addr;
struct f2fs_node *dst_rn = (struct f2fs_node *)dst_addr;
memcpy(&dst_rn->footer, &src_rn->footer, sizeof(struct node_footer));
}
void NodeMgr::FillNodeFooterBlkaddr(Page *page, block_t blkaddr) {
// TODO: IMPL
// struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
f2fs_sb_info &sbi = fs_->SbInfo();
struct f2fs_checkpoint *ckpt = F2FS_CKPT(&sbi);
void *kaddr = PageAddress(page);
struct f2fs_node *rn = (struct f2fs_node *)kaddr;
rn->footer.cp_ver = ckpt->checkpoint_ver;
rn->footer.next_blkaddr = blkaddr;
}
inline nid_t NodeMgr::InoOfNode(Page *node_page) {
void *kaddr = PageAddress(node_page);
struct f2fs_node *rn = (struct f2fs_node *)kaddr;
return LeToCpu(rn->footer.ino);
}
inline nid_t NodeMgr::NidOfNode(Page *node_page) {
void *kaddr = PageAddress(node_page);
struct f2fs_node *rn = (struct f2fs_node *)kaddr;
return LeToCpu(rn->footer.nid);
}
unsigned int NodeMgr::OfsOfNode(Page *node_page) {
void *kaddr = PageAddress(node_page);
struct f2fs_node *rn = (struct f2fs_node *)kaddr;
unsigned flag = LeToCpu(rn->footer.flag);
return flag >> OFFSET_BIT_SHIFT;
}
unsigned long long NodeMgr::CpverOfNode(Page *node_page) {
void *kaddr = PageAddress(node_page);
struct f2fs_node *rn = (struct f2fs_node *)kaddr;
return LeToCpu(rn->footer.cp_ver);
}
block_t NodeMgr::NextBlkaddrOfNode(Page *node_page) {
void *kaddr = PageAddress(node_page);
struct f2fs_node *rn = (struct f2fs_node *)kaddr;
return LeToCpu(rn->footer.next_blkaddr);
}
/*
* f2fs assigns the following node offsets described as (num).
* N = NIDS_PER_BLOCK
*
* Inode block (0)
* |- direct node (1)
* |- direct node (2)
* |- indirect node (3)
* | `- direct node (4 => 4 + N - 1)
* |- indirect node (4 + N)
* | `- direct node (5 + N => 5 + 2N - 1)
* `- double indirect node (5 + 2N)
* `- indirect node (6 + 2N)
* `- direct node (x(N + 1))
*/
bool NodeMgr::IS_DNODE(Page *node_page) {
unsigned int ofs = OfsOfNode(node_page);
if (ofs == 3 || ofs == 4 + NIDS_PER_BLOCK || ofs == 5 + 2 * NIDS_PER_BLOCK)
return false;
if (ofs >= 6 + 2 * NIDS_PER_BLOCK) {
ofs -= 6 + 2 * NIDS_PER_BLOCK;
if ((long int)ofs % (NIDS_PER_BLOCK + 1))
return false;
}
return true;
}
inline void NodeMgr::SetNid(Page *p, int off, nid_t nid, bool i) {
struct f2fs_node *rn = static_cast<struct f2fs_node *>(PageAddress(p));
WaitOnPageWriteback(p);
if (i) {
rn->i.i_nid[off - NODE_DIR1_BLOCK] = CpuToLe(nid);
} else {
rn->in.nid[off] = CpuToLe(nid);
}
#if 0 // porting needed
// set_page_dirty(p);
#endif
FlushDirtyNodePage(fs_, p);
}
inline nid_t NodeMgr::GetNid(Page *p, int off, bool i) {
struct f2fs_node *rn = (struct f2fs_node *)PageAddress(p);
if (i)
return LeToCpu(rn->i.i_nid[off - NODE_DIR1_BLOCK]);
return LeToCpu(rn->in.nid[off]);
}
/*
* Coldness identification:
* - Mark cold files in f2fs_inode_info
* - Mark cold node blocks in their node footer
* - Mark cold data pages in page cache
*/
int NodeMgr::IsColdFile(VnodeF2fs *vnode) { return vnode->fi_.i_advise & FADVISE_COLD_BIT; }
int NodeMgr::IsColdData(Page *page) {
#if 0 // porting needed
// return PageChecked(page);
#endif
return 0;
}
#if 0 // porting needed
inline void NodeMgr::SetColdData(Page *page) {
// SetPageChecked(page);
}
#endif
void NodeMgr::ClearColdData(Page *page) {
#if 0 // porting needed
// ClearPageChecked(page);
#endif
}
int NodeMgr::IsColdNode(Page *page) {
void *kaddr = PageAddress(page);
struct f2fs_node *rn = (struct f2fs_node *)kaddr;
unsigned int flag = LeToCpu(rn->footer.flag);
return flag & (0x1 << COLD_BIT_SHIFT);
}
unsigned char NodeMgr::IsFsyncDnode(Page *page) {
void *kaddr = PageAddress(page);
struct f2fs_node *rn = (struct f2fs_node *)kaddr;
unsigned int flag = LeToCpu(rn->footer.flag);
return flag & (0x1 << FSYNC_BIT_SHIFT);
}
unsigned char NodeMgr::IsDentDnode(Page *page) {
void *kaddr = PageAddress(page);
struct f2fs_node *rn = (struct f2fs_node *)kaddr;
unsigned int flag = LeToCpu(rn->footer.flag);
return flag & (0x1 << DENT_BIT_SHIFT);
}
inline void NodeMgr::SetColdNode(VnodeF2fs *vnode, Page *page) {
struct f2fs_node *rn = (struct f2fs_node *)PageAddress(page);
unsigned int flag = LeToCpu(rn->footer.flag);
if (S_ISDIR(vnode->i_mode_))
flag &= ~(0x1 << COLD_BIT_SHIFT);
else
flag |= (0x1 << COLD_BIT_SHIFT);
rn->footer.flag = CpuToLe(flag);
}
void NodeMgr::SetFsyncMark(Page *page, int mark) {
void *kaddr = PageAddress(page);
struct f2fs_node *rn = (struct f2fs_node *)kaddr;
unsigned int flag = LeToCpu(rn->footer.flag);
if (mark)
flag |= (0x1 << FSYNC_BIT_SHIFT);
else
flag &= ~(0x1 << FSYNC_BIT_SHIFT);
rn->footer.flag = CpuToLe(flag);
}
void NodeMgr::SetDentryMark(Page *page, int mark) {
void *kaddr = PageAddress(page);
struct f2fs_node *rn = (struct f2fs_node *)kaddr;
unsigned int flag = LeToCpu(rn->footer.flag);
if (mark)
flag |= (0x1 << DENT_BIT_SHIFT);
else
flag &= ~(0x1 << DENT_BIT_SHIFT);
rn->footer.flag = CpuToLe(flag);
}
inline void NodeMgr::DecValidNodeCount(struct f2fs_sb_info *sbi, VnodeF2fs *vnode,
unsigned int count) {
SpinLock(&sbi->stat_lock);
// TODO: IMPL
ZX_ASSERT(!(sbi->total_valid_block_count < count));
ZX_ASSERT(!(sbi->total_valid_node_count < count));
ZX_ASSERT(!(vnode->i_blocks_ < count));
vnode->i_blocks_ -= count;
sbi->total_valid_node_count -= count;
sbi->total_valid_block_count -= static_cast<block_t>(count);
SpinUnlock(&sbi->stat_lock);
}
/*
* Functions
*/
NodeMgr::NodeMgr(F2fs *fs) : fs_(fs){};
void NodeMgr::ClearNodePageDirty(Page *page) {
f2fs_sb_info &sbi = fs_->SbInfo();
#if 0 // porting needed
// struct address_space *mapping = page->mapping;
// unsigned int long flags;
#endif
if (PageDirty(page)) {
#if 0 // porting needed
// TODO: IMPL
// SpinLock_irqsave(&mapping->tree_lock, flags);
// radix_tree_tag_clear(&mapping->page_tree,
// page_index(page),
// PAGECACHE_TAG_DIRTY);
// SpinUnlock_irqrestore(&mapping->tree_lock, flags);
#endif
ClearPageDirtyForIo(page);
DecPageCount(&sbi, F2FS_DIRTY_NODES);
}
ClearPageUptodate(page);
}
Page *NodeMgr::GetCurrentNatPage(nid_t nid) {
pgoff_t index = CurrentNatAddr(nid);
return fs_->GetMetaPage(index);
}
Page *NodeMgr::GetNextNatPage(nid_t nid) {
Page *src_page;
Page *dst_page;
pgoff_t src_off;
pgoff_t dst_off;
void *src_addr;
void *dst_addr;
f2fs_sb_info &sbi = fs_->SbInfo();
struct f2fs_nm_info *nm_i = NM_I(&sbi);
src_off = CurrentNatAddr(nid);
dst_off = NextNatAddr(src_off);
/* get current nat block page with lock */
src_page = fs_->GetMetaPage(src_off);
/* Dirty src_page means that it is already the new target NAT page. */
#if 0 // porting needed
// if (PageDirty(src_page))
#endif
if (IsUpdatedNatPage(nid))
return src_page;
dst_page = fs_->GrabMetaPage(dst_off);
src_addr = PageAddress(src_page);
dst_addr = PageAddress(dst_page);
memcpy(dst_addr, src_addr, kPageCacheSize);
#if 0 // porting needed
// set_page_dirty(dst_page);
#endif
F2fsPutPage(src_page, 1);
SetToNextNat(nm_i, nid);
return dst_page;
}
/**
* Readahead NAT pages
*/
void NodeMgr::RaNatPages(nid_t nid) {
// struct address_space *mapping = sbi_->meta_inode->i_mapping;
f2fs_sb_info &sbi = fs_->SbInfo();
struct f2fs_nm_info *nm_i = NM_I(&sbi);
struct Page *page;
pgoff_t index;
int i;
for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) {
if (nid >= nm_i->max_nid)
nid = 0;
index = CurrentNatAddr(nid);
page = GrabCachePage(nullptr, F2FS_META_INO(&sbi), index);
if (!page)
continue;
if (VnodeF2fs::Readpage(fs_, page, index, 0 /*READ*/)) {
F2fsPutPage(page, 1);
continue;
}
#if 0 // porting needed
// page_cache_release(page);
#endif
F2fsPutPage(page, 1);
}
}
struct nat_entry *NodeMgr::__LookupNatCache(struct f2fs_nm_info *nm_i, nid_t n) {
// TODO: IMPL
// TODO: need to be modified to use radix tree
list_node_t *cur, *next;
list_for_every_safe(&nm_i->dirty_nat_entries, cur, next) {
struct nat_entry *e = containerof(cur, nat_entry, list);
if (nat_get_nid(e) == n) {
return e;
}
}
list_for_every_safe(&nm_i->nat_entries, cur, next) {
struct nat_entry *e = containerof(cur, nat_entry, list);
if (nat_get_nid(e) == n) {
return e;
}
}
#if 0 // porting needed
// return radix_tree_lookup(&nm_i->nat_root, n);
#endif
return nullptr;
}
unsigned int NodeMgr::__GangLookupNatCache(struct f2fs_nm_info *nm_i, nid_t start, unsigned int nr,
struct nat_entry **ep) {
// TODO: IMPL
// TODO: need to be modified to use radix tree
unsigned int ret = 0;
for (unsigned int i = 0; i < nr; i++) {
nid_t cur_nid = start + i;
ep[ret] = __LookupNatCache(nm_i, cur_nid);
if (ep[ret]) {
if (++ret == nr) {
break;
}
}
}
return ret;
#if 0 // porting needed
// return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
// return 0;
#endif
}
void NodeMgr::__DelFromNatCache(struct f2fs_nm_info *nm_i, struct nat_entry *e) {
#if 0 // porting needed
// radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
#endif
list_delete(&e->list);
nm_i->nat_cnt--;
#if 0 // porting needed
// kmem_cache_free(nat_entry_slab, e);
#endif
delete e;
}
int NodeMgr::IsCheckpointedNode(nid_t nid) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct f2fs_nm_info *nm_i = NM_I(&sbi);
struct nat_entry *e;
int is_cp = 1;
ReadLock(&nm_i->nat_tree_lock);
e = __LookupNatCache(nm_i, nid);
if (e && !e->checkpointed)
is_cp = 0;
ReadUnlock(&nm_i->nat_tree_lock);
return is_cp;
}
struct nat_entry *NodeMgr::GrabNatEntry(struct f2fs_nm_info *nm_i, nid_t nid) {
struct nat_entry *new_entry;
#if 0 // porting needed (kmem_cache_alloc)
// new_entry = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
#endif
new_entry = new nat_entry;
if (!new_entry)
return nullptr;
#if 0 // porting needed
// if (radix_tree_insert(&nm_i->nat_root, nid, new_entry)) {
// delete new_entry;
// return NULL;
// }
#endif
memset(new_entry, 0, sizeof(struct nat_entry));
nat_set_nid(new_entry, nid);
list_add_tail(&nm_i->nat_entries, &new_entry->list);
nm_i->nat_cnt++;
return new_entry;
}
void NodeMgr::CacheNatEntry(struct f2fs_nm_info *nm_i, nid_t nid, struct f2fs_nat_entry *ne) {
struct nat_entry *e;
retry:
WriteLock(&nm_i->nat_tree_lock);
e = __LookupNatCache(nm_i, nid);
if (!e) {
e = GrabNatEntry(nm_i, nid);
if (!e) {
WriteUnlock(&nm_i->nat_tree_lock);
goto retry;
}
nat_set_blkaddr(e, LeToCpu(ne->block_addr));
nat_set_ino(e, LeToCpu(ne->ino));
nat_set_version(e, ne->version);
e->checkpointed = true;
}
WriteUnlock(&nm_i->nat_tree_lock);
}
void NodeMgr::SetNodeAddr(struct node_info *ni, block_t new_blkaddr) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct f2fs_nm_info *nm_i = NM_I(&sbi);
struct nat_entry *e;
retry:
WriteLock(&nm_i->nat_tree_lock);
e = __LookupNatCache(nm_i, ni->nid);
if (!e) {
e = GrabNatEntry(nm_i, ni->nid);
if (!e) {
WriteUnlock(&nm_i->nat_tree_lock);
goto retry;
}
e->ni = *ni;
e->checkpointed = true;
ZX_ASSERT(ni->blk_addr != NEW_ADDR);
} else if (new_blkaddr == NEW_ADDR) {
/*
* when nid is reallocated,
* previous nat entry can be remained in nat cache.
* So, reinitialize it with new information.
*/
e->ni = *ni;
ZX_ASSERT(ni->blk_addr == NULL_ADDR);
}
if (new_blkaddr == NEW_ADDR)
e->checkpointed = false;
/* sanity check */
ZX_ASSERT(!(nat_get_blkaddr(e) != ni->blk_addr));
ZX_ASSERT(!(nat_get_blkaddr(e) == NULL_ADDR && new_blkaddr == NULL_ADDR));
ZX_ASSERT(!(nat_get_blkaddr(e) == NEW_ADDR && new_blkaddr == NEW_ADDR));
ZX_ASSERT(!(nat_get_blkaddr(e) != NEW_ADDR && nat_get_blkaddr(e) != NULL_ADDR &&
new_blkaddr == NEW_ADDR));
/* increament version no as node is removed */
if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
unsigned char version = nat_get_version(e);
nat_set_version(e, inc_node_version(version));
}
/* change address */
nat_set_blkaddr(e, new_blkaddr);
__set_nat_cache_dirty(nm_i, e);
WriteUnlock(&nm_i->nat_tree_lock);
}
int NodeMgr::TryToFreeNats(int nr_shrink) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct f2fs_nm_info *nm_i = NM_I(&sbi);
if (nm_i->nat_cnt < 2 * NM_WOUT_THRESHOLD)
return 0;
WriteLock(&nm_i->nat_tree_lock);
while (nr_shrink && !list_is_empty(&nm_i->nat_entries)) {
struct nat_entry *ne;
// ne = list_first_entry(&nm_i->nat_entries,
// struct nat_entry, list);
ne = containerof((&nm_i->nat_entries)->next, struct nat_entry, list);
__DelFromNatCache(nm_i, ne);
nr_shrink--;
}
WriteUnlock(&nm_i->nat_tree_lock);
return nr_shrink;
}
/**
* This function returns always success
*/
void NodeMgr::GetNodeInfo(nid_t nid, struct node_info *ni) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct f2fs_nm_info *nm_i = NM_I(&sbi);
struct curseg_info *curseg = SegMgr::CURSEG_I(&sbi, CURSEG_HOT_DATA);
struct f2fs_summary_block *sum = curseg->sum_blk;
nid_t start_nid = START_NID(nid);
struct f2fs_nat_block *nat_blk;
struct Page *page = NULL;
struct f2fs_nat_entry ne;
struct nat_entry *e;
int i;
ni->nid = nid;
/* Check nat cache */
ReadLock(&nm_i->nat_tree_lock);
e = __LookupNatCache(nm_i, nid);
if (e) {
ni->ino = nat_get_ino(e);
ni->blk_addr = nat_get_blkaddr(e);
ni->version = nat_get_version(e);
}
ReadUnlock(&nm_i->nat_tree_lock);
if (e)
return;
/* Check current segment summary */
mtx_lock(&curseg->curseg_mutex);
i = SegMgr::LookupJournalInCursum(sum, NAT_JOURNAL, nid, 0);
if (i >= 0) {
ne = nat_in_journal(sum, i);
NodeInfoFromRawNat(ni, &ne);
}
mtx_unlock(&curseg->curseg_mutex);
if (i >= 0)
goto cache;
/* Fill node_info from nat page */
page = GetCurrentNatPage(start_nid);
nat_blk = (struct f2fs_nat_block *)PageAddress(page);
ne = nat_blk->entries[nid - start_nid];
NodeInfoFromRawNat(ni, &ne);
F2fsPutPage(page, 1);
cache:
/* cache nat entry */
CacheNatEntry(NM_I(&sbi), nid, &ne);
}
/**
* The maximum depth is four.
* Offset[0] will have raw inode offset.
*/
int NodeMgr::GetNodePath(long block, int offset[4], unsigned int noffset[4]) {
const long direct_index = ADDRS_PER_INODE;
const long direct_blks = ADDRS_PER_BLOCK;
const long dptrs_per_blk = NIDS_PER_BLOCK;
const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
int n = 0;
int level = 0;
noffset[0] = 0;
if (block < direct_index) {
offset[n++] = block;
level = 0;
goto got;
}
block -= direct_index;
if (block < direct_blks) {
offset[n++] = NODE_DIR1_BLOCK;
noffset[n] = 1;
offset[n++] = block;
level = 1;
goto got;
}
block -= direct_blks;
if (block < direct_blks) {
offset[n++] = NODE_DIR2_BLOCK;
noffset[n] = 2;
offset[n++] = block;
level = 1;
goto got;
}
block -= direct_blks;
if (block < indirect_blks) {
offset[n++] = NODE_IND1_BLOCK;
noffset[n] = 3;
offset[n++] = block / direct_blks;
noffset[n] = 4 + offset[n - 1];
offset[n++] = block % direct_blks;
level = 2;
goto got;
}
block -= indirect_blks;
if (block < indirect_blks) {
offset[n++] = NODE_IND2_BLOCK;
noffset[n] = 4 + dptrs_per_blk;
offset[n++] = block / direct_blks;
noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
offset[n++] = block % direct_blks;
level = 2;
goto got;
}
block -= indirect_blks;
if (block < dindirect_blks) {
offset[n++] = NODE_DIND_BLOCK;
noffset[n] = 5 + (dptrs_per_blk * 2);
offset[n++] = block / indirect_blks;
noffset[n] = 6 + (dptrs_per_blk * 2) + offset[n - 1] * (dptrs_per_blk + 1);
offset[n++] = (block / direct_blks) % dptrs_per_blk;
noffset[n] = 7 + (dptrs_per_blk * 2) + offset[n - 2] * (dptrs_per_blk + 1) + offset[n - 1];
offset[n++] = block % direct_blks;
level = 3;
goto got;
} else {
ZX_ASSERT(0);
}
got:
return level;
}
/*
* Caller should call f2fs_put_dnode(dn).
*/
zx_status_t NodeMgr::GetDnodeOfData(struct dnode_of_data *dn, pgoff_t index, int ro) {
f2fs_sb_info &sbi = fs_->SbInfo();
Page *npage[4];
Page *parent;
int offset[4];
unsigned int noffset[4];
nid_t nids[4];
int level, i;
zx_status_t err = 0;
level = GetNodePath(index, offset, noffset);
nids[0] = dn->vnode->Ino();
npage[0] = nullptr;
err = GetNodePage(nids[0], &npage[0]);
if (err)
return err;
parent = npage[0];
nids[1] = GetNid(parent, offset[0], true);
dn->inode_page = npage[0];
dn->inode_page_locked = true;
/* get indirect or direct nodes */
for (i = 1; i <= level; i++) {
bool done = false;
if (!nids[i] && !ro) {
mutex_lock_op(&sbi, NODE_NEW);
/* alloc new node */
if (!AllocNid(&(nids[i]))) {
mutex_unlock_op(&sbi, NODE_NEW);
err = -ENOSPC;
goto release_pages;
}
dn->nid = nids[i];
npage[i] = nullptr;
err = NewNodePage(dn, noffset[i], &npage[i]);
if (err) {
AllocNidFailed(nids[i]);
mutex_unlock_op(&sbi, NODE_NEW);
goto release_pages;
}
SetNid(parent, offset[i - 1], nids[i], i == 1);
AllocNidDone(nids[i]);
mutex_unlock_op(&sbi, NODE_NEW);
done = true;
} else if (ro && i == level && level > 1) {
#if 0 // porting needed
// err = GetNodePageRa(parent, offset[i - 1], &npage[i]);
// if (err) {
// goto release_pages;
// }
// done = true;
#endif
}
if (i == 1) {
dn->inode_page_locked = false;
#if 0 // porting needed
// unlock_page(parent);
#endif
} else {
F2fsPutPage(parent, 1);
}
if (!done) {
npage[i] = nullptr;
err = GetNodePage(nids[i], &npage[i]);
if (err) {
F2fsPutPage(npage[0], 0);
goto release_out;
}
}
if (i < level) {
parent = npage[i];
nids[i + 1] = GetNid(parent, offset[i], false);
}
}
dn->nid = nids[level];
dn->ofs_in_node = offset[level];
dn->node_page = npage[level];
dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
#ifdef F2FS_BU_DEBUG
std::cout << "NodeMgr::GetDnodeOfData"
<< ", dn->nid=" << dn->nid
<< ", dn->node_page=" << dn->node_page
<< ", dn->ofs_in_node=" << dn->ofs_in_node
<< ", dn->data_blkaddr=" << dn->data_blkaddr
<< std::endl;
#endif
return 0;
release_pages:
F2fsPutPage(parent, 1);
if (i > 1)
F2fsPutPage(npage[0], 0);
release_out:
dn->inode_page = nullptr;
dn->node_page = nullptr;
return err;
}
void NodeMgr::TruncateNode(struct dnode_of_data *dn) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct node_info ni;
GetNodeInfo(dn->nid, &ni);
ZX_ASSERT(ni.blk_addr != NULL_ADDR);
if (ni.blk_addr != NULL_ADDR)
fs_->Segmgr().InvalidateBlocks(ni.blk_addr);
/* Deallocate node address */
DecValidNodeCount(&sbi, dn->vnode, 1);
SetNodeAddr(&ni, NULL_ADDR);
if (dn->nid == dn->vnode->Ino()) {
fs_->RemoveOrphanInode(dn->nid);
dec_valid_inode_count(&sbi);
} else {
SyncInodePage(dn);
}
ClearNodePageDirty(dn->node_page);
F2FS_SET_SB_DIRT(&sbi);
F2fsPutPage(dn->node_page, 1);
dn->node_page = nullptr;
}
int NodeMgr::TruncateDnode(struct dnode_of_data *dn) {
Page *page = nullptr;
zx_status_t err = 0;
if (dn->nid == 0)
return 1;
/* get direct node */
err = fs_->Nodemgr().GetNodePage(dn->nid, &page);
if (err && err == ZX_ERR_NOT_FOUND)
return 1;
else if (err)
return err;
/* Make dnode_of_data for parameter */
dn->node_page = page;
dn->ofs_in_node = 0;
dn->vnode->TruncateDataBlocks(dn);
TruncateNode(dn);
return 1;
}
int NodeMgr::TruncateNodes(struct dnode_of_data *dn, unsigned int nofs, int ofs, int depth) {
struct dnode_of_data rdn = *dn;
Page *page = nullptr;
struct f2fs_node *rn;
nid_t child_nid;
unsigned int child_nofs;
int freed = 0;
int i, ret;
zx_status_t err = 0;
if (dn->nid == 0)
return NIDS_PER_BLOCK + 1;
err = fs_->Nodemgr().GetNodePage(dn->nid, &page);
if (err)
return err;
rn = (struct f2fs_node *)PageAddress(page);
if (depth < 3) {
for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
child_nid = LeToCpu(rn->in.nid[i]);
if (child_nid == 0)
continue;
rdn.nid = child_nid;
ret = TruncateDnode(&rdn);
if (ret < 0)
goto out_err;
SetNid(page, i, 0, false);
}
} else {
child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
for (i = ofs; i < NIDS_PER_BLOCK; i++) {
child_nid = LeToCpu(rn->in.nid[i]);
if (child_nid == 0) {
child_nofs += NIDS_PER_BLOCK + 1;
continue;
}
rdn.nid = child_nid;
ret = TruncateNodes(&rdn, child_nofs, 0, depth - 1);
if (ret == (NIDS_PER_BLOCK + 1)) {
SetNid(page, i, 0, false);
child_nofs += ret;
} else if (ret < 0 && ret != -ENOENT) {
goto out_err;
}
}
freed = child_nofs;
}
if (!ofs) {
/* remove current indirect node */
dn->node_page = page;
TruncateNode(dn);
freed++;
} else {
F2fsPutPage(page, 1);
}
return freed;
out_err:
F2fsPutPage(page, 1);
return ret;
}
int NodeMgr::TruncatePartialNodes(struct dnode_of_data *dn, struct f2fs_inode *ri, int *offset,
int depth) {
Page *pages[2];
nid_t nid[3];
nid_t child_nid;
zx_status_t err = 0;
int i;
int idx = depth - 2;
nid[0] = LeToCpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
if (!nid[0])
return 0;
/* get indirect nodes in the path */
for (i = 0; i < depth - 1; i++) {
/* refernece count'll be increased */
pages[i] = nullptr;
err = fs_->Nodemgr().GetNodePage(nid[i], &pages[i]);
if (err) {
depth = i + 1;
goto fail;
}
nid[i + 1] = GetNid(pages[i], offset[i + 1], false);
}
/* free direct nodes linked to a partial indirect node */
for (i = offset[depth - 1]; i < NIDS_PER_BLOCK; i++) {
child_nid = GetNid(pages[idx], i, false);
if (!child_nid)
continue;
dn->nid = child_nid;
err = TruncateDnode(dn);
if (err < 0)
goto fail;
SetNid(pages[idx], i, 0, false);
}
if (offset[depth - 1] == 0) {
dn->node_page = pages[idx];
dn->nid = nid[idx];
TruncateNode(dn);
} else {
F2fsPutPage(pages[idx], 1);
}
offset[idx]++;
offset[depth - 1] = 0;
fail:
for (i = depth - 3; i >= 0; i--)
F2fsPutPage(pages[i], 1);
return err;
}
/**
* All the block addresses of data and nodes should be nullified.
*/
int NodeMgr::TruncateInodeBlocks(VnodeF2fs *vnode, pgoff_t from) {
int cont = 1;
int level, offset[4], noffset[4];
unsigned int nofs;
struct f2fs_node *rn;
struct dnode_of_data dn;
Page *page = nullptr;
zx_status_t err = 0;
level = GetNodePath(from, offset, (unsigned int *)noffset);
err = GetNodePage(vnode->Ino(), &page);
if (err)
return err;
SetNewDnode(&dn, vnode, page, nullptr, 0);
#if 0 // porting needed
// unlock_page(page);
#endif
rn = (f2fs_node *)PageAddress(page);
switch (level) {
case 0:
case 1:
nofs = noffset[1];
break;
case 2:
nofs = noffset[1];
if (!offset[level - 1])
goto skip_partial;
err = TruncatePartialNodes(&dn, &rn->i, offset, level);
if (err < 0 && err != -ENOENT)
goto fail;
nofs += 1 + NIDS_PER_BLOCK;
break;
case 3:
nofs = 5 + 2 * NIDS_PER_BLOCK;
if (!offset[level - 1])
goto skip_partial;
err = TruncatePartialNodes(&dn, &rn->i, offset, level);
if (err < 0 && err != -ENOENT)
goto fail;
break;
default:
ZX_ASSERT(0);
}
skip_partial:
while (cont) {
dn.nid = LeToCpu(rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]);
switch (offset[0]) {
case NODE_DIR1_BLOCK:
case NODE_DIR2_BLOCK:
err = TruncateDnode(&dn);
break;
case NODE_IND1_BLOCK:
case NODE_IND2_BLOCK:
err = TruncateNodes(&dn, nofs, offset[1], 2);
break;
case NODE_DIND_BLOCK:
err = TruncateNodes(&dn, nofs, offset[1], 3);
cont = 0;
break;
default:
ZX_ASSERT(0);
}
if (err < 0 && err != -ENOENT)
goto fail;
if (offset[1] == 0 && rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]) {
#if 0 // porting needed
// lock_page(page);
#endif
WaitOnPageWriteback(page);
rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
#if 0 // porting needed
// set_page_dirty(page);
#endif
FlushDirtyNodePage(fs_, page);
#if 0 // porting needed
// unlock_page(page);
#endif
}
offset[1] = 0;
offset[0]++;
nofs += err;
}
fail:
F2fsPutPage(page, 0);
return err > 0 ? 0 : err;
}
int NodeMgr::RemoveInodePage(VnodeF2fs *vnode) {
f2fs_sb_info &sbi = fs_->SbInfo();
Page *page = nullptr;
nid_t ino = vnode->Ino();
struct dnode_of_data dn;
zx_status_t err = 0;
mutex_lock_op(&sbi, NODE_TRUNC);
err = GetNodePage(ino, &page);
if (err) {
mutex_unlock_op(&sbi, NODE_TRUNC);
return err;
}
if (vnode->Inode().i_xattr_nid) {
nid_t nid = vnode->Inode().i_xattr_nid;
Page *npage = nullptr;
err = GetNodePage(nid, &npage);
if (err) {
mutex_unlock_op(&sbi, NODE_TRUNC);
return err;
}
#if 0 // porting needed
// vnode->Inode().i_xattr_nid = 0;
#endif
SetNewDnode(&dn, vnode, page, npage, nid);
dn.inode_page_locked = true;
TruncateNode(&dn);
}
if (vnode->i_blocks_ == 1) {
/* inernally call f2fs_put_page() */
SetNewDnode(&dn, vnode, page, page, ino);
TruncateNode(&dn);
} else if (vnode->i_blocks_ == 0) {
struct node_info ni;
GetNodeInfo(vnode->Ino(), &ni);
/* called after f2fs_new_inode() is failed */
ZX_ASSERT(ni.blk_addr == NULL_ADDR);
F2fsPutPage(page, 1);
} else {
ZX_ASSERT(0);
}
mutex_unlock_op(&sbi, NODE_TRUNC);
return 0;
}
zx_status_t NodeMgr::NewInodePage(Dir *parent, VnodeF2fs *child) {
f2fs_sb_info &sbi = fs_->SbInfo();
Page *page = nullptr;
struct dnode_of_data dn;
zx_status_t err = 0;
/* allocate inode page for new inode */
SetNewDnode(&dn, child, nullptr, nullptr, child->Ino());
mutex_lock_op(&sbi, NODE_NEW);
err = NewNodePage(&dn, 0, &page);
parent->InitDentInode(child, page);
mutex_unlock_op(&sbi, NODE_NEW);
if (err)
return err;
F2fsPutPage(page, 1);
return ZX_OK;
}
zx_status_t NodeMgr::NewNodePage(struct dnode_of_data *dn, unsigned int ofs, Page **out) {
f2fs_sb_info &sbi = fs_->SbInfo();
//[[maybe_unused]] struct address_space *mapping = sbi.node_inode->i_mapping;
struct node_info old_ni, new_ni;
Page *page = nullptr;
int err;
if (is_inode_flag_set(&dn->vnode->fi_, FI_NO_ALLOC))
return ZX_ERR_ACCESS_DENIED;
page = GrabCachePage(nullptr, F2FS_NODE_INO(&sbi), dn->nid);
if (!page)
return ZX_ERR_NO_MEMORY;
GetNodeInfo(dn->nid, &old_ni);
SetPageUptodate(page);
FillNodeFooter(page, dn->nid, dn->vnode->Ino(), ofs, true);
/* Reinitialize old_ni with new node page */
ZX_ASSERT(old_ni.blk_addr == NULL_ADDR);
new_ni = old_ni;
new_ni.ino = dn->vnode->Ino();
if (!inc_valid_node_count(&sbi, dn->vnode, 1)) {
err = ZX_ERR_NO_SPACE;
goto fail;
}
SetNodeAddr(&new_ni, NEW_ADDR);
dn->node_page = page;
SyncInodePage(dn);
#if 0 // porting needed
// set_page_dirty(page);
#endif
FlushDirtyNodePage(fs_, page);
SetColdNode(dn->vnode, page);
if (ofs == 0)
inc_valid_inode_count(&sbi);
*out = page;
return ZX_OK;
fail:
F2fsPutPage(page, 1);
return err;
}
zx_status_t NodeMgr::ReadNodePage(Page *page, unsigned long nid, int type) {
node_info ni;
GetNodeInfo(nid, &ni);
if (ni.blk_addr == NULL_ADDR)
return ZX_ERR_NOT_FOUND;
if (ni.blk_addr == NEW_ADDR) {
#ifdef F2FS_BU_DEBUG
std::cout << "NodeMgr::ReadNodePage, Read New address..." << std::endl;
#endif
return ZX_OK;
}
return VnodeF2fs::Readpage(fs_, page, ni.blk_addr, type);
}
/**
* Readahead a node page
*/
#if 0 // porting needed
void NodeMgr::RaNodePage(nid_t nid) {
// TODO: IMPL Read ahead
}
#endif
zx_status_t NodeMgr::GetNodePage(pgoff_t nid, Page **out) {
int err;
Page *page = nullptr;
f2fs_sb_info &sbi = fs_->SbInfo();
#if 0 // porting needed
// struct address_space *mapping = sbi_->node_inode->i_mapping;
#endif
page = GrabCachePage(nullptr, F2FS_NODE_INO(&sbi), nid);
if (!page)
return ZX_ERR_NO_MEMORY;
err = ReadNodePage(page, nid, kReadSync);
if (err) {
F2fsPutPage(page, 1);
return err;
}
ZX_ASSERT(nid == NidOfNode(page));
#if 0 // porting needed
// mark_page_accessed(page);
#endif
*out = page;
return ZX_OK;
}
/**
* Return a locked page for the desired node page.
* And, readahead MAX_RA_NODE number of node pages.
*/
Page *NodeMgr::GetNodePageRa(Page *parent, int start) {
// TODO: IMPL Read ahead
return nullptr;
}
void NodeMgr::SyncInodePage(struct dnode_of_data *dn) {
if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
dn->vnode->UpdateInode(dn->node_page);
} else if (dn->inode_page) {
if (!dn->inode_page_locked)
#if 0 // porting needed
// lock_page(dn->inode_page);
#endif
dn->vnode->UpdateInode(dn->inode_page);
#if 0 // porting needed
// if (!dn->inode_page_locked)
// unlock_page(dn->inode_page);
#endif
} else {
dn->vnode->WriteInode(nullptr);
}
}
int NodeMgr::SyncNodePages(nid_t ino, struct WritebackControl *wbc) {
#if 0 // porting needed
// f2fs_sb_info &sbi = fs_->SbInfo();
// //struct address_space *mapping = sbi.node_inode->i_mapping;
// pgoff_t index, end;
// // TODO: IMPL
// //struct pagevec pvec;
// int step = ino ? 2 : 0;
// int nwritten = 0, wrote = 0;
// // TODO: IMPL
// //pagevec_init(&pvec, 0);
// next_step:
// index = 0;
// end = LONG_MAX;
// while (index <= end) {
// int i, nr_pages;
// TODO: IMPL
// nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
// PAGECACHE_TAG_DIRTY,
// min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
// if (nr_pages == 0)
// break;
// for (i = 0; i < nr_pages; i++) {
// struct page *page = pvec.pages[i];
// /*
// * flushing sequence with step:
// * 0. indirect nodes
// * 1. dentry dnodes
// * 2. file dnodes
// */
// if (step == 0 && IS_DNODE(page))
// continue;
// if (step == 1 && (!IS_DNODE(page) ||
// IsColdNode(page)))
// continue;
// if (step == 2 && (!IS_DNODE(page) ||
// !IsColdNode(page)))
// continue;
// /*
// * If an fsync mode,
// * we should not skip writing node pages.
// */
// if (ino && InoOfNode(page) == ino)
// lock_page(page);
// else if (!trylock_page(page))
// continue;
// if (unlikely(page->mapping != mapping)) {
// continue_unlock:
// unlock_page(page);
// continue;
// }
// if (ino && InoOfNode(page) != ino)
// goto continue_unlock;
// if (!PageDirty(page)) {
// /* someone wrote it for us */
// goto continue_unlock;
// }
// if (!ClearPageDirtyForIo(page))
// goto continue_unlock;
// /* called by fsync() */
// if (ino && IS_DNODE(page)) {
// int mark = !IsCheckpointedNode(sbi, ino);
// SetFsyncMark(page, 1);
// if (IS_INODE(page))
// SetDentryMark(page, mark);
// nwritten++;
// } else {
// SetFyncMark(page, 0);
// SetDentryMark(page, 0);
// }
// mapping->a_ops->writepage(page, wbc);
// wrote++;
// if (--wbc->nr_to_write == 0)
// break;
// }
// pagevec_release(&pvec);
// cond_resched();
// if (wbc->nr_to_write == 0) {
// step = 2;
// break;
// }
// }
// if (step < 2) {
// step++;
// goto next_step;
// }
// if (wrote)
// f2fs_submit_bio(sbi, NODE, wbc->sync_mode == WB_SYNC_ALL);
// return nwritten;
#endif
return 0;
}
zx_status_t NodeMgr::F2fsWriteNodePage(Page *page, struct WritebackControl *wbc) {
f2fs_sb_info &sbi = fs_->SbInfo();
nid_t nid;
unsigned int nofs;
block_t new_addr;
struct node_info ni;
#if 0 // porting needed
// if (wbc->for_reclaim) {
// DecPageCount(&sbi, F2FS_DIRTY_NODES);
// wbc->pages_skipped++;
// // set_page_dirty(page);
// FlushDirtyNodePage(fs_, page);
// return kAopWritepageActivate;
// }
#endif
WaitOnPageWriteback(page);
mutex_lock_op(&sbi, NODE_WRITE);
/* get old block addr of this node page */
nid = NidOfNode(page);
nofs = OfsOfNode(page);
ZX_ASSERT(page->index == nid);
GetNodeInfo(nid, &ni);
/* This page is already truncated */
if (ni.blk_addr == NULL_ADDR) {
mutex_unlock_op(&sbi, NODE_WRITE);
return ZX_OK;
}
SetPageWriteback(page);
/* insert node offset */
fs_->Segmgr().WriteNodePage(page, nid, ni.blk_addr, &new_addr);
SetNodeAddr(&ni, new_addr);
DecPageCount(&sbi, F2FS_DIRTY_NODES);
mutex_unlock_op(&sbi, NODE_WRITE);
// TODO: IMPL
// unlock_page(page);
return ZX_OK;
}
#if 0 // porting needed
int NodeMgr::F2fsWriteNodePages(struct address_space *mapping, struct WritebackControl *wbc) {
// struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
// struct block_device *bdev = sbi->sb->s_bdev;
// long nr_to_write = wbc->nr_to_write;
// if (wbc->for_kupdate)
// return 0;
// if (get_pages(sbi, F2FS_DIRTY_NODES) == 0)
// return 0;
// if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) {
// write_checkpoint(sbi, false, false);
// return 0;
// }
// /* if mounting is failed, skip writing node pages */
// wbc->nr_to_write = bio_get_nr_vecs(bdev);
// sync_node_pages(sbi, 0, wbc);
// wbc->nr_to_write = nr_to_write -
// (bio_get_nr_vecs(bdev) - wbc->nr_to_write);
// return 0;
return 0;
}
#endif
#if 0 // porting needed
int NodeMgr::F2fsSetNodePageDirty(Page *page) {
f2fs_sb_info &sbi = fs_->SbInfo();
SetPageUptodate(page);
if (!PageDirty(page)) {
// __set_page_dirty_nobuffers(page);
FlushDirtyNodePage(fs_, page);
inc_page_count(&sbi, F2FS_DIRTY_NODES);
// SetPagePrivate(page);
return 1;
}
return 0;
}
#endif
#if 0 // porting needed
void NodeMgr::F2fsInvalidateNodePage(Page *page, unsigned long offset) {
f2fs_sb_info &sbi = fs_->SbInfo();
if (PageDirty(page))
DecPageCount(&sbi, F2FS_DIRTY_NODES);
ClearPagePrivate(page);
}
#endif
#if 0 // porting needed
int F2fsReleaseNodePage(Page *page, gfp_t wait) {
ClearPagePrivate(page);
return 0;
}
#endif
struct free_nid *NodeMgr::__LookupFreeNidList(nid_t n, list_node_t *head) {
list_node_t *this_list;
struct free_nid *i = nullptr;
list_for_every(head, this_list) {
i = containerof(this_list, struct free_nid, list);
if (i->nid == n)
break;
i = nullptr;
}
return i;
}
void NodeMgr::__DelFromFreeNidList(struct free_nid *i) {
list_delete(&i->list);
#if 0 // porting needed
// kmem_cache_free(free_nid_slab, i);
#endif
delete i;
}
int NodeMgr::AddFreeNid(struct f2fs_nm_info *nm_i, nid_t nid) {
struct free_nid *i;
if (nm_i->fcnt > 2 * MAX_FREE_NIDS)
return 0;
retry:
#if 0 // porting needed (kmem_cache_alloc)
// i = kmem_cache_alloc(free_nid_slab, GFP_NOFS);
#endif
i = new free_nid;
if (!i) {
#if 0 // porting needed
// cond_resched();
#endif
goto retry;
}
i->nid = nid;
i->state = NID_NEW;
SpinLock(&nm_i->free_nid_list_lock);
if (__LookupFreeNidList(nid, &nm_i->free_nid_list)) {
SpinUnlock(&nm_i->free_nid_list_lock);
#if 0 // porting needed
// kmem_cache_free(free_nid_slab, i);
#endif
delete i;
return 0;
}
list_add_tail(&nm_i->free_nid_list, &i->list);
nm_i->fcnt++;
SpinUnlock(&nm_i->free_nid_list_lock);
return 1;
}
void NodeMgr::RemoveFreeNid(struct f2fs_nm_info *nm_i, nid_t nid) {
struct free_nid *i;
SpinLock(&nm_i->free_nid_list_lock);
i = __LookupFreeNidList(nid, &nm_i->free_nid_list);
if (i && i->state == NID_NEW) {
__DelFromFreeNidList(i);
nm_i->fcnt--;
}
SpinUnlock(&nm_i->free_nid_list_lock);
}
int NodeMgr::ScanNatPage(struct f2fs_nm_info *nm_i, Page *nat_page, nid_t start_nid) {
struct f2fs_nat_block *nat_blk = static_cast<f2fs_nat_block *>(PageAddress(nat_page));
block_t blk_addr;
int fcnt = 0;
unsigned int i;
/* 0 nid should not be used */
if (start_nid == 0)
++start_nid;
i = start_nid % NAT_ENTRY_PER_BLOCK;
for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
blk_addr = LeToCpu(nat_blk->entries[i].block_addr);
ZX_ASSERT(blk_addr != NEW_ADDR);
if (blk_addr == NULL_ADDR)
fcnt += AddFreeNid(nm_i, start_nid);
}
return fcnt;
}
void NodeMgr::BuildFreeNids() {
f2fs_sb_info &sbi = fs_->SbInfo();
[[maybe_unused]] struct free_nid *fnid, *next_fnid;
struct f2fs_nm_info *nm_i = NM_I(&sbi);
struct curseg_info *curseg = SegMgr::CURSEG_I(&sbi, CURSEG_HOT_DATA);
struct f2fs_summary_block *sum = curseg->sum_blk;
nid_t nid = 0;
bool is_cycled = false;
unsigned long fcnt = 0;
int i;
nid = nm_i->next_scan_nid;
nm_i->init_scan_nid = nid;
RaNatPages(nid);
while (true) {
Page *page = GetCurrentNatPage(nid);
fcnt += ScanNatPage(nm_i, page, nid);
F2fsPutPage(page, 1);
nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
if (nid >= nm_i->max_nid) {
nid = 0;
is_cycled = true;
}
if (fcnt > MAX_FREE_NIDS)
break;
if (is_cycled && nm_i->init_scan_nid <= nid)
break;
}
nm_i->next_scan_nid = nid;
/* find free nids from current sum_pages */
mtx_lock(&curseg->curseg_mutex);
for (i = 0; i < nats_in_cursum(sum); i++) {
block_t addr = LeToCpu(nat_in_journal(sum, i).block_addr);
nid = LeToCpu(nid_in_journal(sum, i));
if (addr == NULL_ADDR) {
AddFreeNid(nm_i, nid);
} else {
RemoveFreeNid(nm_i, nid);
}
}
mtx_unlock(&curseg->curseg_mutex);
/* remove the free nids from current allocated nids */
list_for_every_entry_safe (&nm_i->free_nid_list, fnid, next_fnid, free_nid, list) {
struct nat_entry *ne;
ReadLock(&nm_i->nat_tree_lock);
ne = __LookupNatCache(nm_i, fnid->nid);
if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
RemoveFreeNid(nm_i, fnid->nid);
ReadUnlock(&nm_i->nat_tree_lock);
}
}
/*
* If this function returns success, caller can obtain a new nid
* from second parameter of this function.
* The returned nid could be used ino as well as nid when inode is created.
*/
bool NodeMgr::AllocNid(nid_t *nid) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct f2fs_nm_info *nm_i = NM_I(&sbi);
struct free_nid *i = nullptr;
list_node_t *this_list;
retry:
mtx_lock(&nm_i->build_lock);
if (!nm_i->fcnt) {
/* scan NAT in order to build free nid list */
BuildFreeNids();
if (!nm_i->fcnt) {
mtx_unlock(&nm_i->build_lock);
return false;
}
}
mtx_unlock(&nm_i->build_lock);
/*
* We check fcnt again since previous check is racy as
* we didn't hold free_nid_list_lock. So other thread
* could consume all of free nids.
*/
SpinLock(&nm_i->free_nid_list_lock);
if (!nm_i->fcnt) {
SpinUnlock(&nm_i->free_nid_list_lock);
goto retry;
}
ZX_ASSERT(!list_is_empty(&nm_i->free_nid_list));
list_for_every(&nm_i->free_nid_list, this_list) {
i = containerof(this_list, struct free_nid, list);
if (i->state == NID_NEW)
break;
}
ZX_ASSERT(i->state == NID_NEW);
*nid = i->nid;
i->state = NID_ALLOC;
nm_i->fcnt--;
SpinUnlock(&nm_i->free_nid_list_lock);
return true;
}
/**
* alloc_nid() should be called prior to this function.
*/
void NodeMgr::AllocNidDone(nid_t nid) {
f2fs_sb_info &sbi = fs_->SbInfo();
struct f2fs_nm_info *nm_i = NM_I(&sbi);
struct free_nid *i;
SpinLock(&nm_i->free_nid_list_lock);
i = __LookupFreeNidList(nid, &nm_i->free_nid_list);
if (i) {
ZX_ASSERT(i->state == NID_ALLOC);
__DelFromFreeNidList(i);
}
SpinUnlock(&nm_i->free_nid_list_lock);
}
/**
* alloc_nid() should be called prior to this function.
*/
void NodeMgr::AllocNidFailed(nid_t nid) {
f2fs_sb_info &sbi = fs_->SbInfo();
AllocNidDone(nid);
AddFreeNid(NM_I(&sbi), nid);
}
void NodeMgr::RecoverNodePage(Page *page, struct f2fs_summary *sum, struct node_info *ni,
block_t new_blkaddr) {
fs_->Segmgr().RewriteNodePage(page, sum, ni->blk_addr, new_blkaddr);
SetNodeAddr(ni, new_blkaddr);
ClearNodePageDirty(page);
}
zx_status_t NodeMgr::RecoverInodePage(Page *page) {
f2fs_sb_info &sbi = fs_->SbInfo();
//[[maybe_unused]] struct address_space *mapping = sbi.node_inode->i_mapping;
struct f2fs_node *src, *dst;
nid_t ino = InoOfNode(page);
struct node_info old_ni, new_ni;
Page *ipage = nullptr;
ipage = GrabCachePage(nullptr, F2FS_NODE_INO(&sbi), ino);
if (!ipage)
return ZX_ERR_NO_MEMORY;
/* Should not use this inode from free nid list */
RemoveFreeNid(NM_I(&sbi), ino);
GetNodeInfo(ino, &old_ni);
#if 0 // porting needed
// SetPageUptodate(ipage);
#endif
FillNodeFooter(ipage, ino, ino, 0, true);
src = (struct f2fs_node *)PageAddress(page);
dst = (struct f2fs_node *)PageAddress(ipage);
memcpy(dst, src, (unsigned long)&src->i.i_ext - (unsigned long)&src->i);
dst->i.i_size = 0;
dst->i.i_blocks = 1;
dst->i.i_links = 1;
dst->i.i_xattr_nid = 0;
new_ni = old_ni;
new_ni.ino = ino;
SetNodeAddr(&new_ni, NEW_ADDR);
inc_valid_inode_count(&sbi);
F2fsPutPage(ipage, 1);
return ZX_OK;
}
int NodeMgr::RestoreNodeSummary(F2fs *fs, unsigned int segno, struct f2fs_summary_block *sum) {
f2fs_sb_info &sbi = fs->SbInfo();
struct f2fs_node *rn;
struct f2fs_summary *sum_entry;
struct Page *page = nullptr;
block_t addr;
int i, last_offset;
/* scan the node segment */
last_offset = sbi.blocks_per_seg;
addr = START_BLOCK(&sbi, segno);
sum_entry = &sum->entries[0];
#if 0 // porting needed
/* alloc temporal page for read node */
// page = alloc_page(GFP_NOFS | __GFP_ZERO);
#endif
page = GrabCachePage(nullptr, F2FS_NODE_INO(&sbi), addr);
if (page == nullptr)
return ZX_ERR_NO_MEMORY;
#if 0 // porting needed
// lock_page(page);
#endif
for (i = 0; i < last_offset; i++, sum_entry++) {
if (VnodeF2fs::Readpage(fs, page, addr, kReadSync))
goto out;
rn = (struct f2fs_node *)PageAddress(page);
sum_entry->nid = rn->footer.nid;
sum_entry->version = 0;
sum_entry->ofs_in_node = 0;
addr++;
/*
* In order to read next node page,
* we must clear PageUptodate flag.
*/
#if 0 // porting needed
// ClearPageUptodate(page);
#endif
}
out:
#if 0 // porting needed
// unlock_page(page);
//__free_pages(page, 0);
#endif
F2fsPutPage(page, 1);
return 0;
}
bool NodeMgr::FlushNatsInJournal() {
f2fs_sb_info &sbi = fs_->SbInfo();
struct f2fs_nm_info *nm_i = NM_I(&sbi);
struct curseg_info *curseg = fs_->Segmgr().CURSEG_I(&sbi, CURSEG_HOT_DATA);
struct f2fs_summary_block *sum = curseg->sum_blk;
int i;
mtx_lock(&curseg->curseg_mutex);
if (nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) {
mtx_unlock(&curseg->curseg_mutex);
return false;
}
for (i = 0; i < nats_in_cursum(sum); i++) {
struct nat_entry *ne;
struct f2fs_nat_entry raw_ne;
nid_t nid = LeToCpu(nid_in_journal(sum, i));
raw_ne = nat_in_journal(sum, i);
retry:
WriteLock(&nm_i->nat_tree_lock);
ne = __LookupNatCache(nm_i, nid);
if (ne) {
__set_nat_cache_dirty(nm_i, ne);
WriteUnlock(&nm_i->nat_tree_lock);
continue;
}
ne = GrabNatEntry(nm_i, nid);
if (!ne) {
WriteUnlock(&nm_i->nat_tree_lock);
goto retry;
}
nat_set_blkaddr(ne, LeToCpu(raw_ne.block_addr));
nat_set_ino(ne, LeToCpu(raw_ne.ino));
nat_set_version(ne, raw_ne.version);
__set_nat_cache_dirty(nm_i, ne);
WriteUnlock(&nm_i->nat_tree_lock);
}
update_nats_in_cursum(sum, -i);
mtx_unlock(&curseg->curseg_mutex);
return true;
}
/**
* This function is called during the checkpointing process.
*/
void NodeMgr::FlushNatEntries() {
f2fs_sb_info &sbi = fs_->SbInfo();
struct f2fs_nm_info *nm_i = NM_I(&sbi);
struct curseg_info *curseg = fs_->Segmgr().CURSEG_I(&sbi, CURSEG_HOT_DATA);
struct f2fs_summary_block *sum = curseg->sum_blk;
list_node_t *cur, *n;
Page *page = nullptr;
struct f2fs_nat_block *nat_blk = nullptr;
nid_t start_nid = 0, end_nid = 0;
bool flushed;
flushed = FlushNatsInJournal();
#if 0 // porting needed
// if (!flushed)
#endif
mtx_lock(&curseg->curseg_mutex);
/* 1) flush dirty nat caches */
list_for_every_safe(&nm_i->dirty_nat_entries, cur, n) {
struct nat_entry *ne;
nid_t nid;
struct f2fs_nat_entry raw_ne;
int offset = -1;
block_t old_blkaddr, new_blkaddr;
ne = containerof(cur, nat_entry, list);
nid = nat_get_nid(ne);
if (nat_get_blkaddr(ne) == NEW_ADDR)
continue;
if (flushed)
goto to_nat_page;
/* if there is room for nat enries in curseg->sumpage */
offset = fs_->Segmgr().LookupJournalInCursum(sum, NAT_JOURNAL, nid, 1);
if (offset >= 0) {
raw_ne = nat_in_journal(sum, offset);
old_blkaddr = LeToCpu(raw_ne.block_addr);
goto flush_now;
}
to_nat_page:
if (!page || (start_nid > nid || nid > end_nid)) {
if (page) {
#if 0 // porting needed
// set_page_dirty(page, fs_);
#endif
FlushDirtyMetaPage(fs_, page);
F2fsPutPage(page, 1);
page = nullptr;
}
start_nid = START_NID(nid);
end_nid = start_nid + NAT_ENTRY_PER_BLOCK - 1;
/*
* get nat block with dirty flag, increased reference
* count, mapped and lock
*/
page = GetNextNatPage(start_nid);
nat_blk = (f2fs_nat_block *)PageAddress(page);
}
ZX_ASSERT(nat_blk);
raw_ne = nat_blk->entries[nid - start_nid];
old_blkaddr = LeToCpu(raw_ne.block_addr);
flush_now:
new_blkaddr = nat_get_blkaddr(ne);
raw_ne.ino = CpuToLe(nat_get_ino(ne));
raw_ne.block_addr = CpuToLe(new_blkaddr);
raw_ne.version = nat_get_version(ne);
if (offset < 0) {
nat_blk->entries[nid - start_nid] = raw_ne;
} else {
nat_in_journal(sum, offset) = raw_ne;
nid_in_journal(sum, offset) = CpuToLe(nid);
}
if (nat_get_blkaddr(ne) == NULL_ADDR) {
WriteLock(&nm_i->nat_tree_lock);
__DelFromNatCache(nm_i, ne);
WriteUnlock(&nm_i->nat_tree_lock);
/* We can reuse this freed nid at this point */
AddFreeNid(NM_I(&sbi), nid);
} else {
WriteLock(&nm_i->nat_tree_lock);
__clear_nat_cache_dirty(nm_i, ne);
ne->checkpointed = true;
WriteUnlock(&nm_i->nat_tree_lock);
}
}
#if 0 // porting needed
// if (!flushed)
#endif
mtx_unlock(&curseg->curseg_mutex);
#if 0 // porting needed
// set_page_dirty(page, fs_);
#endif
FlushDirtyMetaPage(fs_, page);
F2fsPutPage(page, 1);
/* 2) shrink nat caches if necessary */
TryToFreeNats(nm_i->nat_cnt - NM_WOUT_THRESHOLD);
}
zx_status_t NodeMgr::InitNodeManager() {
f2fs_sb_info &sbi = fs_->SbInfo();
struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(&sbi);
struct f2fs_nm_info *nm_i = NM_I(&sbi);
unsigned char *version_bitmap;
unsigned int nat_segs, nat_blocks;
nm_i->nat_blkaddr = LeToCpu(sb_raw->nat_blkaddr);
/* segment_count_nat includes pair segment so divide to 2. */
nat_segs = LeToCpu(sb_raw->segment_count_nat) >> 1;
nat_blocks = nat_segs << LeToCpu(sb_raw->log_blocks_per_seg);
nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
nm_i->fcnt = 0;
nm_i->nat_cnt = 0;
list_initialize(&nm_i->free_nid_list);
#if 0 // porting needed (belows are of no use currently)
// INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC);
#endif
list_initialize(&nm_i->nat_entries);
list_initialize(&nm_i->dirty_nat_entries);
mtx_init(&nm_i->build_lock, mtx_plain);
SpinLockInit(&nm_i->free_nid_list_lock);
RwlockInit(&nm_i->nat_tree_lock);
nm_i->bitmap_size = __bitmap_size(&sbi, NAT_BITMAP);
nm_i->init_scan_nid = LeToCpu(sbi.ckpt->next_free_nid);
nm_i->next_scan_nid = LeToCpu(sbi.ckpt->next_free_nid);
nm_i->nat_bitmap = static_cast<char *>(malloc(nm_i->bitmap_size));
memset(nm_i->nat_bitmap, 0, nm_i->bitmap_size);
nm_i->nat_prev_bitmap = static_cast<char *>(malloc(nm_i->bitmap_size));
memset(nm_i->nat_prev_bitmap, 0, nm_i->bitmap_size);
if (!nm_i->nat_bitmap)
return ZX_ERR_NO_MEMORY;
version_bitmap = static_cast<unsigned char *>(__bitmap_ptr(&sbi, NAT_BITMAP));
if (!version_bitmap)
return ZX_ERR_INVALID_ARGS;
/* copy version bitmap */
memcpy(nm_i->nat_bitmap, version_bitmap, nm_i->bitmap_size);
memcpy(nm_i->nat_prev_bitmap, nm_i->nat_bitmap, nm_i->bitmap_size);
return ZX_OK;
}
zx_status_t NodeMgr::BuildNodeManager() {
f2fs_sb_info &sbi = fs_->SbInfo();
int err;
sbi.nm_info = new f2fs_nm_info;
if (!sbi.nm_info)
return ZX_ERR_NO_MEMORY;
err = InitNodeManager();
if (err)
return err;
BuildFreeNids();
return ZX_OK;
}
void NodeMgr::DestroyNodeManager() {
f2fs_sb_info &sbi = fs_->SbInfo();
struct f2fs_nm_info *nm_i = NM_I(&sbi);
[[maybe_unused]] struct free_nid *i, *next_i;
[[maybe_unused]] struct nat_entry *natvec[NATVEC_SIZE];
[[maybe_unused]] nid_t nid = 0;
[[maybe_unused]] unsigned int found;
if (!nm_i)
return;
/* destroy free nid list */
SpinLock(&nm_i->free_nid_list_lock);
list_for_every_entry_safe (&nm_i->free_nid_list, i, next_i, free_nid, list) {
ZX_ASSERT(i->state != NID_ALLOC);
__DelFromFreeNidList(i);
nm_i->fcnt--;
}
ZX_ASSERT(!nm_i->fcnt);
SpinUnlock(&nm_i->free_nid_list_lock);
/* destroy nat cache */
WriteLock(&nm_i->nat_tree_lock);
while ((found = __GangLookupNatCache(nm_i, nid, NATVEC_SIZE, natvec))) {
unsigned idx;
for (idx = 0; idx < found; idx++) {
struct nat_entry *e = natvec[idx];
nid = nat_get_nid(e) + 1;
__DelFromNatCache(nm_i, e);
}
}
// TODO: Check nm_i->nat_cnt
//ZX_ASSERT(!nm_i->nat_cnt);
WriteUnlock(&nm_i->nat_tree_lock);
delete[] nm_i->nat_bitmap;
delete[] nm_i->nat_prev_bitmap;
sbi.nm_info = nullptr;
delete nm_i;
}
int NodeMgr::CreateNodeManagerCaches() {
#if 0 // porting needed
// nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
// sizeof(struct nat_entry), NULL);
// if (!nat_entry_slab)
// return -ENOMEM;
// free_nid_slab = f2fs_kmem_cache_create("free_nid",
// sizeof(struct free_nid), NULL);
// if (!free_nid_slab) {
// kmem_cache_destroy(nat_entry_slab);
// return -ENOMEM;
// }
#endif
return 0;
}
void NodeMgr::DestroyNodeManagerCaches() {
#if 0 // porting needed
// kmem_cache_destroy(free_nid_slab);
// kmem_cache_destroy(nat_entry_slab);
#endif
}
} // namespace f2fs