blob: d8f7c6d5e10885d7e0026922816993c425782eb2 [file] [log] [blame]
// Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "f2fs.h"
#include "zircon/errors.h"
namespace f2fs {
bool F2fs::SpaceForRollForward() {
f2fs_sb_info &sbi = SbInfo();
if (sbi.last_valid_block_count + sbi.alloc_valid_block_count > sbi.user_block_count)
return false;
return true;
}
struct fsync_inode_entry *F2fs::GetFsyncInode(list_node_t *head, nid_t ino) {
list_node_t *this_node;
struct fsync_inode_entry *entry;
list_for_every(head, this_node) {
entry = containerof(this_node, struct fsync_inode_entry, list);
if (entry->vnode->Ino() == ino)
return entry;
}
return nullptr;
}
zx_status_t F2fs::RecoverDentry(Page *ipage, VnodeF2fs *vnode) {
struct f2fs_node *raw_node = (struct f2fs_node *)PageAddress(ipage);
struct f2fs_inode *raw_inode = &(raw_node->i);
fbl::RefPtr<VnodeF2fs> dir_refptr;
Dir *dir;
struct f2fs_dir_entry *de;
Page *page;
zx_status_t err = ZX_OK;
if (!Nodemgr().IsDentDnode(ipage))
goto out;
err = VnodeF2fs::Vget(this, LeToCpu(raw_inode->i_pino), &dir_refptr);
if (err != ZX_OK) {
goto out;
}
dir = (Dir *)dir_refptr.get();
#if 0 // porting needed
// parent.d_inode = dir;
// dent.d_parent = &parent;
// dent.d_name.len = LeToCpu(raw_inode->i_namelen);
// dent.d_name.name = raw_inode->i_name;
#endif
de = dir->FindEntry(vnode->i_name_sp_, &page);
if (de) {
#if 0 // porting needed
// kunmap(page);
#endif
F2fsPutPage(page, 0);
} else {
dir->AddLink(vnode->i_name_sp_, vnode);
}
Iput(dir);
out:
#if 0 // porting needed
// kunmap(ipage);
#endif
return err;
}
zx_status_t F2fs::RecoverInode(VnodeF2fs *vnode, Page *node_page) {
void *kaddr = PageAddress(node_page);
struct f2fs_node *raw_node = static_cast<struct f2fs_node *>(kaddr);
struct f2fs_inode *raw_inode = &(raw_node->i);
vnode->i_mode_ = LeToCpu(raw_inode->i_mode);
vnode->i_size_ = LeToCpu(raw_inode->i_size);
vnode->i_atime_.tv_sec = LeToCpu(raw_inode->i_mtime);
vnode->i_ctime_.tv_sec = LeToCpu(raw_inode->i_ctime);
vnode->i_mtime_.tv_sec = LeToCpu(raw_inode->i_mtime);
vnode->i_atime_.tv_nsec = LeToCpu(raw_inode->i_mtime_nsec);
vnode->i_ctime_.tv_nsec = LeToCpu(raw_inode->i_ctime_nsec);
vnode->i_mtime_.tv_nsec = LeToCpu(raw_inode->i_mtime_nsec);
return RecoverDentry(node_page, vnode);
}
zx_status_t F2fs::FindFsyncDnodes(list_node_t *head) {
f2fs_sb_info &sbi = SbInfo();
unsigned long long cp_ver = LeToCpu(sbi.ckpt->checkpoint_ver);
struct curseg_info *curseg;
fbl::RefPtr<VnodeF2fs> vnode_refptr;
Page *page;
block_t blkaddr;
f2fs_inode *ri;
f2fs_node *rn;
zx_status_t err = 0;
/* get node pages in the current segment */
curseg = SegMgr::CURSEG_I(&sbi, CURSEG_WARM_NODE);
blkaddr = START_BLOCK(&sbi, curseg->segno) + curseg->next_blkoff;
/* read node page */
page = GrabCachePage(nullptr, F2FS_NODE_INO(sbi_), blkaddr);
if (!page)
return ZX_ERR_NO_MEMORY;
#if 0 // porting needed
// lock_page(page);
#endif
while (true) {
struct fsync_inode_entry *entry;
void *kaddr = PageAddress(page);
rn = (struct f2fs_node *)kaddr;
if (VnodeF2fs::Readpage(this, page, blkaddr, kReadSync)) {
goto out;
}
#ifdef F2FS_BU_DEBUG
std::cout << "F2fs::FindFsyncDnodes, blkaddr=" << blkaddr << ", ino=" << rn->footer.ino
<< ", nid=" << rn->footer.nid << ", flag=" << rn->footer.flag
<< ", cp_ver=" << rn->footer.cp_ver << ", next_blkaddr=" << rn->footer.next_blkaddr
<< std::endl;
#endif
if (cp_ver != Nodemgr().CpverOfNode(page)) {
goto out;
}
if (!Nodemgr().IsFsyncDnode(page)) {
goto next;
}
entry = GetFsyncInode(head, Nodemgr().InoOfNode(page));
if (entry) {
entry->blkaddr = blkaddr;
if (IS_INODE(page) && Nodemgr().IsDentDnode(page)) {
SetInodeFlag(&entry->vnode->fi_, FI_INC_LINK);
}
} else {
if (IS_INODE(page) && Nodemgr().IsDentDnode(page)) {
if (Nodemgr().RecoverInodePage(page)) {
err = ZX_ERR_NO_MEMORY;
goto out;
}
}
/* add this fsync inode to the list */
// entry = kmem_cache_alloc(fsync_entry_slab, GFP_NOFS);
entry = new fsync_inode_entry;
if (!entry) {
err = ZX_ERR_NO_MEMORY;
goto out;
}
list_initialize(&entry->list);
list_add_tail(&entry->list, head);
// vnode_refptr.reset(entry->vnode);
// err = VnodeF2fs::Vget(this, Nodemgr().InoOfNode(page), &vnode_refptr);
rn = (struct f2fs_node *)PageAddress(page);
ri = &(rn->i);
entry->vnode = vnode_refptr.get();
if (entry->vnode == nullptr) {
err = ZX_ERR_NO_MEMORY;
goto out;
}
entry->blkaddr = blkaddr;
}
if (IS_INODE(page)) {
err = RecoverInode(entry->vnode, page);
if (err) {
goto out;
}
}
next:
/* check next segment */
blkaddr = NodeMgr::NextBlkaddrOfNode(page);
ClearPageUptodate(page);
}
out:
#if 0 // porting needed
// unlock_page(page);
//__free_pages(page, 0);
#endif
delete page;
return err;
}
void F2fs::DestroyFsyncDnodes(list_node_t *head) {
list_node_t *this_node;
struct fsync_inode_entry *entry;
list_for_every(head, this_node) {
entry = containerof(this_node, struct fsync_inode_entry, list);
Iput(entry->vnode);
list_delete(&entry->list);
#if 0 // porting needed
// kmem_cache_free(fsync_entry_slab, entry);
#endif
delete entry;
}
}
void F2fs::CheckIndexInPrevNodes(block_t blkaddr) {
f2fs_sb_info &sbi = SbInfo();
struct seg_entry *sentry;
unsigned int segno = GET_SEGNO(&sbi, blkaddr);
unsigned short blkoff = GET_SEGOFF_FROM_SEG0(&sbi, blkaddr) & (sbi.blocks_per_seg - 1);
struct f2fs_summary sum;
nid_t ino;
void *kaddr;
fbl::RefPtr<VnodeF2fs> vnode_refptr;
VnodeF2fs *vnode;
Page *node_page = nullptr;
block_t bidx;
int i;
zx_status_t err = 0;
sentry = Segmgr().GetSegEntry(segno);
if (!f2fs_test_bit(blkoff, reinterpret_cast<char *>(sentry->cur_valid_map)))
return;
/* Get the previous summary */
for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
struct curseg_info *curseg = Segmgr().CURSEG_I(&sbi, i);
if (curseg->segno == segno) {
sum = curseg->sum_blk->entries[blkoff];
break;
}
}
if (i > CURSEG_COLD_DATA) {
Page *sum_page = Segmgr().GetSumPage(segno);
struct f2fs_summary_block *sum_node;
kaddr = PageAddress(sum_page);
sum_node = static_cast<struct f2fs_summary_block *>(kaddr);
sum = sum_node->entries[blkoff];
F2fsPutPage(sum_page, 1);
}
/* Get the node page */
err = Nodemgr().GetNodePage(LeToCpu(sum.nid), &node_page);
#ifdef F2FS_BU_DEBUG
if (err) {
std::cout << "F2fs::CheckIndexInPrevNodes, GetNodePage Error!!!" << std::endl;
return;
}
#endif
bidx = StartBidxOfNode(Nodemgr().OfsOfNode(node_page)) + LeToCpu(sum.ofs_in_node);
ino = Nodemgr().InoOfNode(node_page);
F2fsPutPage(node_page, 1);
/* Deallocate previous index in the node page */
#if 0 // porting needed
// vnode = F2fsIgetNowait(ino);
#else
VnodeF2fs::Vget(this, ino, &vnode_refptr);
vnode = vnode_refptr.get();
#endif
vnode->TruncateHole(bidx, bidx + 1);
Iput(vnode);
}
void F2fs::DoRecoverData(VnodeF2fs *vnode, Page *page, block_t blkaddr) {
unsigned int start, end;
struct dnode_of_data dn;
struct f2fs_summary sum;
struct node_info ni;
start = StartBidxOfNode(Nodemgr().OfsOfNode(page));
if (IS_INODE(page)) {
end = start + ADDRS_PER_INODE;
} else {
end = start + ADDRS_PER_BLOCK;
}
SetNewDnode(&dn, vnode, nullptr, nullptr, 0);
if (Nodemgr().GetDnodeOfData(&dn, start, 0))
return;
WaitOnPageWriteback(dn.node_page);
Nodemgr().GetNodeInfo(dn.nid, &ni);
ZX_ASSERT(ni.ino == Nodemgr().InoOfNode(page));
ZX_ASSERT(Nodemgr().OfsOfNode(dn.node_page) == Nodemgr().OfsOfNode(page));
for (; start < end; start++) {
block_t src, dest;
src = datablock_addr(dn.node_page, dn.ofs_in_node);
dest = datablock_addr(page, dn.ofs_in_node);
if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR) {
if (src == NULL_ADDR) {
int err = vnode->ReserveNewBlock(&dn);
/* We should not get -ENOSPC */
ZX_ASSERT(!err);
}
/* Check the previous node page having this index */
CheckIndexInPrevNodes(dest);
Segmgr().SetSummary(&sum, dn.nid, dn.ofs_in_node, ni.version);
/* write dummy data page */
Segmgr().RecoverDataPage(nullptr, &sum, src, dest);
vnode->UpdateExtentCache(dest, &dn);
}
dn.ofs_in_node++;
}
/* write node page in place */
Segmgr().SetSummary(&sum, dn.nid, 0, 0);
if (IS_INODE(dn.node_page))
Nodemgr().SyncInodePage(&dn);
Nodemgr().CopyNodeFooter(dn.node_page, page);
Nodemgr().FillNodeFooter(dn.node_page, dn.nid, ni.ino, Nodemgr().OfsOfNode(page), false);
#if 0 // porting needed
// set_page_dirty(dn.node_page, this);
#else
FlushDirtyNodePage(this, dn.node_page);
#endif
Nodemgr().RecoverNodePage(dn.node_page, &sum, &ni, blkaddr);
F2fsPutDnode(&dn);
}
void F2fs::RecoverData(list_node_t *head, int type) {
f2fs_sb_info &sbi = SbInfo();
uint64_t cp_ver = LeToCpu(sbi.ckpt->checkpoint_ver);
struct curseg_info *curseg;
Page *page;
block_t blkaddr;
/* get node pages in the current segment */
curseg = SegMgr::CURSEG_I(&sbi, type);
blkaddr = NEXT_FREE_BLKADDR(&sbi, curseg);
/* read node page */
page = GrabCachePage(nullptr, F2FS_NODE_INO(sbi_), blkaddr);
if (page == nullptr)
return;
#if 0 // porting needed
// lock_page(page);
#endif
while (true) {
struct fsync_inode_entry *entry;
if (VnodeF2fs::Readpage(this, page, blkaddr, kReadSync))
goto out;
if (cp_ver != Nodemgr().CpverOfNode(page))
goto out;
entry = GetFsyncInode(head, Nodemgr().InoOfNode(page));
if (!entry)
goto next;
DoRecoverData(entry->vnode, page, blkaddr);
if (entry->blkaddr == blkaddr) {
Iput(entry->vnode);
list_delete(&entry->list);
#if 0 // porting needed
// kmem_cache_free(fsync_entry_slab, entry);
#endif
delete entry;
}
next:
/* check next segment */
blkaddr = NodeMgr::NextBlkaddrOfNode(page);
ClearPageUptodate(page);
}
out:
#if 0 // porting needed
// unlock_page(page);
//__free_pages(page, 0);
#endif
F2fsPutPage(page, 1);
Segmgr().AllocateNewSegments();
}
void F2fs::RecoverFsyncData() {
f2fs_sb_info &sbi = SbInfo();
list_node_t inode_list;
#if 0 // porting needed
// fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
// sizeof(struct fsync_inode_entry), NULL);
// if (unlikely(!fsync_entry_slab))
// return;
#endif
list_initialize(&inode_list);
/* step #1: find fsynced inode numbers */
if (FindFsyncDnodes(&inode_list)) {
goto out;
}
if (list_is_empty(&inode_list)) {
goto out;
}
/* step #2: recover data */
sbi.por_doing = 1;
RecoverData(&inode_list, CURSEG_WARM_NODE);
sbi.por_doing = 0;
ZX_ASSERT(list_is_empty(&inode_list));
out:
DestroyFsyncDnodes(&inode_list);
#if 0 // porting needed
// kmem_cache_destroy(fsync_entry_slab);
#endif
WriteCheckpoint(false, false);
}
} // namespace f2fs