| // Copyright 2021 The Fuchsia Authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| |
| #include <dirent.h> |
| #include <stdint.h> |
| #include <string.h> |
| |
| #include "f2fs.h" |
| |
| namespace f2fs { |
| |
| VnodeF2fs::VnodeF2fs(F2fs *fs) : Vnode(fs) {} |
| |
| VnodeF2fs::VnodeF2fs(F2fs *fs, ino_t ino) : Vnode(fs), ino_(ino) {} |
| |
| fs::VnodeProtocolSet VnodeF2fs::GetProtocols() const { |
| if (IsDir()) { |
| return fs::VnodeProtocol::kDirectory; |
| } else { |
| return fs::VnodeProtocol::kFile; |
| } |
| } |
| |
| zx_status_t VnodeF2fs::GetNodeInfoForProtocol([[maybe_unused]] fs::VnodeProtocol protocol, |
| [[maybe_unused]] fs::Rights rights, |
| fs::VnodeRepresentation *info) { |
| if (IsDir()) { |
| *info = fs::VnodeRepresentation::Directory(); |
| } else { |
| *info = fs::VnodeRepresentation::File(); |
| } |
| return ZX_OK; |
| } |
| |
| void VnodeF2fs::Allocate(F2fs *fs, ino_t ino, uint32_t mode, fbl::RefPtr<VnodeF2fs> *out) { |
| // Check if ino is within scope |
| fs->CheckNidRange(ino); |
| if (S_ISDIR(mode)) { |
| *out = fbl::MakeRefCounted<Dir>(fs, ino); |
| } else { |
| *out = fbl::MakeRefCounted<File>(fs, ino); |
| } |
| (*out)->Init(); |
| } |
| |
| void VnodeF2fs::Create(F2fs *fs, ino_t ino, fbl::RefPtr<VnodeF2fs> *out) { |
| Page *node_page = nullptr; |
| |
| if (ino == NodeIno(&fs->GetSbInfo()) || ino == MetaIno(&fs->GetSbInfo())) { |
| *out = fbl::MakeRefCounted<VnodeF2fs>(fs, ino); |
| return; |
| } |
| |
| /* Check if ino is within scope */ |
| fs->CheckNidRange(ino); |
| |
| if (fs->Nodemgr().GetNodePage(ino, &node_page) != ZX_OK) { |
| return; |
| } |
| |
| Node *rn = static_cast<Node *>(PageAddress(node_page)); |
| Inode *ri = &(rn->i); |
| |
| if (S_ISDIR(ri->i_mode)) { |
| *out = fbl::MakeRefCounted<Dir>(fs, ino); |
| } else { |
| *out = fbl::MakeRefCounted<File>(fs, ino); |
| } |
| |
| VnodeF2fs *vnode = out->get(); |
| |
| vnode->Init(); |
| vnode->SetMode(LeToCpu(ri->i_mode)); |
| vnode->SetUid(LeToCpu(ri->i_uid)); |
| vnode->SetGid(LeToCpu(ri->i_gid)); |
| vnode->SetNlink(ri->i_links); |
| vnode->SetSize(LeToCpu(ri->i_size)); |
| vnode->SetBlocks(LeToCpu(ri->i_blocks)); |
| vnode->SetATime(LeToCpu(ri->i_atime), LeToCpu(ri->i_atime_nsec)); |
| vnode->SetCTime(LeToCpu(ri->i_ctime), LeToCpu(ri->i_ctime_nsec)); |
| vnode->SetMTime(LeToCpu(ri->i_mtime), LeToCpu(ri->i_mtime_nsec)); |
| vnode->SetGeneration(LeToCpu(ri->i_generation)); |
| vnode->SetParentNid(LeToCpu(ri->i_pino)); |
| vnode->SetCurDirDepth(LeToCpu(ri->i_current_depth)); |
| vnode->SetXattrNid(LeToCpu(ri->i_xattr_nid)); |
| vnode->SetInodeFlags(LeToCpu(ri->i_flags)); |
| vnode->fi_.flags = 0; |
| #if 0 // porting needed |
| // vnode->fi.data_version = LeToCpu(GetCheckpoint(sbi)->checkpoint_ver) - 1; |
| #endif |
| vnode->SetAdvise(ri->i_advise); |
| vnode->GetExtentInfo(ri->i_ext); |
| vnode->SetName(std::string_view(reinterpret_cast<char *>(ri->i_name), ri->i_namelen)); |
| |
| if (ri->i_inline & kInlineDentry) |
| vnode->SetInodeFlag(InodeInfoFlag::kInlineDentry); |
| else |
| vnode->ClearInodeFlag(InodeInfoFlag::kInlineDentry); |
| |
| F2fsPutPage(node_page, 1); |
| } |
| |
| zx_status_t VnodeF2fs::OpenNode([[maybe_unused]] ValidatedOptions options, |
| fbl::RefPtr<Vnode> *out_redirect) { |
| std::lock_guard lock(mutex_); |
| fd_count_++; |
| return ZX_OK; |
| } |
| |
| zx_status_t VnodeF2fs::CloseNode() { |
| std::lock_guard lock(mutex_); |
| fd_count_--; |
| return ZX_OK; |
| } |
| |
| void VnodeF2fs::RecycleNode() { |
| // TODO: |
| // implement the desired caching behavior |
| // currently, it remove its entry from vnode table and delete |
| Vfs()->EraseVnodeFromTable(this); |
| delete this; |
| } |
| |
| zx_status_t VnodeF2fs::GetAttributes(fs::VnodeAttributes *a) { |
| #ifdef F2FS_BU_DEBUG |
| FX_LOGS(DEBUG) << "f2fs_getattr() vn=" << this << "(#" << ino_ << ")"; |
| #endif |
| *a = fs::VnodeAttributes(); |
| |
| a->mode = GetMode(); |
| a->inode = Ino(); |
| a->content_size = GetSize(); |
| a->storage_size = GetBlockCount() * kBlockSize; |
| a->link_count = GetNlink(); |
| a->creation_time = zx_time_add_duration(ZX_SEC(ctime_.tv_sec), ctime_.tv_nsec); |
| a->modification_time = zx_time_add_duration(ZX_SEC(mtime_.tv_sec), mtime_.tv_nsec); |
| |
| return ZX_OK; |
| } |
| |
| zx_status_t VnodeF2fs::SetAttributes(fs::VnodeAttributesUpdate attr) { |
| bool need_inode_sync = false; |
| |
| if (attr.has_creation_time()) { |
| SetCTime(zx_timespec_from_duration(attr.take_creation_time())); |
| need_inode_sync = true; |
| } |
| if (attr.has_modification_time()) { |
| SetMTime(zx_timespec_from_duration(attr.take_modification_time())); |
| need_inode_sync = true; |
| } |
| if (attr.any()) { |
| return ZX_ERR_INVALID_ARGS; |
| } |
| |
| if (need_inode_sync) { |
| MarkInodeDirty(); |
| } |
| |
| return ZX_OK; |
| } |
| |
| struct f2fs_iget_args { |
| uint64_t ino; |
| int on_free; |
| }; |
| |
| #if 0 // porting needed |
| // void VnodeF2fs::F2fsSetInodeFlags() { |
| // uint64_t &flags = fi.i_flags; |
| |
| // inode_.i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | |
| // S_NOATIME | S_DIRSYNC); |
| |
| // if (flags & FS_SYNC_FL) |
| // inode_.i_flags |= S_SYNC; |
| // if (flags & FS_APPEND_FL) |
| // inode_.i_flags |= S_APPEND; |
| // if (flags & FS_IMMUTABLE_FL) |
| // inode_.i_flags |= S_IMMUTABLE; |
| // if (flags & FS_NOATIME_FL) |
| // inode_.i_flags |= S_NOATIME; |
| // if (flags & FS_DIRSYNC_FL) |
| // inode_.i_flags |= S_DIRSYNC; |
| // } |
| |
| // int VnodeF2fs::F2fsIgetTest(void *data) { |
| // f2fs_iget_args *args = (f2fs_iget_args *)data; |
| |
| // if (ino_ != args->ino) |
| // return 0; |
| // if (i_state & (I_FREEING | I_WILL_FREE)) { |
| // args->on_free = 1; |
| // return 0; |
| // } |
| // return 1; |
| // } |
| |
| // VnodeF2fs *VnodeF2fs::F2fsIgetNowait(uint64_t ino) { |
| // fbl::RefPtr<VnodeF2fs> vnode_refptr; |
| // VnodeF2fs *vnode = nullptr; |
| // f2fs_iget_args args = {.ino = ino, .on_free = 0}; |
| // vnode = ilookup5(sb, ino, F2fsIgetTest, &args); |
| |
| // if (vnode) |
| // return vnode; |
| // if (!args.on_free) { |
| // Vget(Vfs(), ino, &vnode_refptr); |
| // vnode = vnode_refptr.get(); |
| // return vnode; |
| // } |
| // return static_cast<VnodeF2fs *>(ErrPtr(ZX_ERR_NOT_FOUND)); |
| // } |
| #endif |
| |
| zx_status_t VnodeF2fs::Vget(F2fs *fs, uint64_t ino, fbl::RefPtr<VnodeF2fs> *out) { |
| fbl::RefPtr<VnodeF2fs> vnode_refptr; |
| VnodeF2fs *vnode; |
| |
| if (fs->FindVnode(&vnode_refptr, ino) == ZX_OK) { |
| *out = std::move(vnode_refptr); |
| return ZX_OK; |
| } |
| |
| Create(fs, ino, &vnode_refptr); |
| |
| vnode = vnode_refptr.get(); |
| |
| if (vnode == nullptr) { |
| return ZX_ERR_NO_MEMORY; |
| } |
| |
| fs->InsertVnode(vnode); |
| |
| #if 0 // porting needed |
| // if (!(vnode->i_state & I_NEW)) |
| // return vnode; |
| #endif |
| |
| if (!(ino == NodeIno(&fs->GetSbInfo()) || ino == MetaIno(&fs->GetSbInfo()))) { |
| if (!fs->GetSbInfo().por_doing && vnode->GetNlink() == 0) { |
| #if 0 // porting needed |
| // iget_failed(inode); |
| #endif |
| return ZX_ERR_NOT_FOUND; |
| } |
| } |
| |
| #if 0 // porting needed |
| // if (ino == NodeIno(sbi)) { |
| // // inode->i_mapping->a_ops = &Node_aops; //invalidatepage, releasepage |
| // // mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO); |
| // } else if (ino == MetaIno(sbi)) { |
| // // inode->i_mapping->a_ops = &f2fs_meta_aops; //empty |
| // // mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO); |
| // } else if (S_ISREG(inode->i_mode)) { |
| // // inode->i_op = &f2fs_file_inode_operations; //empty |
| // // inode->i_fop = &f2fs_file_operations; //empty |
| // // inode->i_mapping->a_ops = &f2fs_dblock_aops; |
| // } else if (S_ISDIR(inode->i_mode)) { |
| // // inode->i_op = &f2fs_dir_inode_operations; //lookup only |
| // // inode->i_fop = &f2fs_dir_operations; //read, readdir |
| // // inode->i_mapping->a_ops = &f2fs_dblock_aops; //readpage, readpages, invalidatepage, |
| // releasepage |
| // // mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER_MOVABLE | |
| // // __GFP_ZERO); |
| // } else if (S_ISLNK(inode->i_mode)) { |
| // // inode->i_op = &f2fs_symlink_inode_operations; //empty |
| // // inode->i_mapping->a_ops = &f2fs_dblock_aops; |
| // } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || |
| // S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { |
| // // inode->i_op = &f2fs_special_inode_operations; //empty |
| // init_special_inode(inode, inode->i_mode, inode->i_rdev); |
| // } else { |
| // iget_failed(inode); |
| // return ZX_ERR_IO; |
| // } |
| #endif |
| |
| *out = std::move(vnode_refptr); |
| |
| return ZX_OK; |
| } |
| |
| void VnodeF2fs::UpdateInode(Page *node_page) { |
| Node *rn; |
| Inode *ri; |
| |
| WaitOnPageWriteback(node_page); |
| |
| rn = static_cast<Node *>(PageAddress(node_page)); |
| ri = &(rn->i); |
| |
| ri->i_mode = CpuToLe(GetMode()); |
| ri->i_advise = GetAdvise(); |
| ri->i_uid = CpuToLe(GetUid()); |
| ri->i_gid = CpuToLe(GetGid()); |
| ri->i_links = CpuToLe(GetNlink()); |
| ri->i_size = CpuToLe(GetSize()); |
| ri->i_blocks = CpuToLe(GetBlocks()); |
| SetRawExtent(ri->i_ext); |
| |
| ri->i_atime = CpuToLe(static_cast<uint64_t>(atime_.tv_sec)); |
| ri->i_ctime = CpuToLe(static_cast<uint64_t>(ctime_.tv_sec)); |
| ri->i_mtime = CpuToLe(static_cast<uint64_t>(mtime_.tv_sec)); |
| ri->i_atime_nsec = CpuToLe(static_cast<uint32_t>(atime_.tv_nsec)); |
| ri->i_ctime_nsec = CpuToLe(static_cast<uint32_t>(ctime_.tv_nsec)); |
| ri->i_mtime_nsec = CpuToLe(static_cast<uint64_t>(mtime_.tv_nsec)); |
| ri->i_current_depth = CpuToLe(GetCurDirDepth()); |
| ri->i_xattr_nid = CpuToLe(GetXattrNid()); |
| ri->i_flags = CpuToLe(GetInodeFlags()); |
| ri->i_generation = CpuToLe(GetGeneration()); |
| |
| ri->i_namelen = CpuToLe(GetNameLen()); |
| memcpy(ri->i_name, GetName(), GetNameLen()); |
| |
| if (IsInodeFlagSet(InodeInfoFlag::kInlineDentry)) |
| ri->i_inline |= kInlineDentry; |
| else |
| ri->i_inline &= ~kInlineDentry; |
| |
| #if 0 // porting needed |
| // set_page_dirty(node_page); |
| #else |
| FlushDirtyNodePage(Vfs(), node_page); |
| #endif |
| } |
| |
| int VnodeF2fs::WriteInode(WritebackControl *wbc) TA_NO_THREAD_SAFETY_ANALYSIS { |
| SbInfo &sbi = Vfs()->GetSbInfo(); |
| Page *node_page = nullptr; |
| zx_status_t ret = ZX_OK; |
| |
| if (ino_ == NodeIno(&sbi) || ino_ == MetaIno(&sbi)) |
| return ret; |
| |
| if (ret = Vfs()->Nodemgr().GetNodePage(ino_, &node_page); ret != ZX_OK) |
| return ret; |
| |
| if (PageDirty(node_page)) { |
| UpdateInode(node_page); |
| F2fsPutPage(node_page, 1); |
| } else { |
| F2fsPutPage(node_page, 1); |
| fbl::AutoLock lock(&sbi.write_inode); |
| if (ret = Vfs()->Nodemgr().GetNodePage(ino_, &node_page); ret != ZX_OK) |
| return ret; |
| UpdateInode(node_page); |
| F2fsPutPage(node_page, 1); |
| } |
| |
| return ZX_OK; |
| } |
| |
| zx_status_t VnodeF2fs::DoTruncate(size_t len) { |
| zx_status_t ret; |
| |
| if (ret = TruncateBlocks(len); ret == ZX_OK) { |
| SetSize(len); |
| timespec cur_time; |
| clock_gettime(CLOCK_REALTIME, &cur_time); |
| SetCTime(cur_time); |
| SetMTime(cur_time); |
| MarkInodeDirty(); |
| } |
| |
| Vfs()->Segmgr().BalanceFs(); |
| return ret; |
| } |
| |
| int VnodeF2fs::TruncateDataBlocksRange(DnodeOfData *dn, int count) { |
| int nr_free = 0, ofs = dn->ofs_in_node; |
| Node *raw_node; |
| uint32_t *addr; |
| |
| raw_node = static_cast<Node *>(PageAddress(dn->node_page)); |
| addr = BlkaddrInNode(raw_node) + ofs; |
| |
| for (; count > 0; count--, addr++, dn->ofs_in_node++) { |
| block_t blkaddr = LeToCpu(*addr); |
| if (blkaddr == kNullAddr) |
| continue; |
| |
| UpdateExtentCache(kNullAddr, dn); |
| Vfs()->Segmgr().InvalidateBlocks(blkaddr); |
| Vfs()->DecValidBlockCount(dn->vnode, 1); |
| nr_free++; |
| } |
| if (nr_free) { |
| #if 0 // porting needed |
| // set_page_dirty(dn->node_page); |
| #else |
| FlushDirtyNodePage(Vfs(), dn->node_page); |
| #endif |
| Vfs()->Nodemgr().SyncInodePage(dn); |
| } |
| dn->ofs_in_node = ofs; |
| return nr_free; |
| } |
| |
| void VnodeF2fs::TruncateDataBlocks(DnodeOfData *dn) { TruncateDataBlocksRange(dn, kAddrsPerBlock); } |
| |
| void VnodeF2fs::TruncatePartialDataPage(uint64_t from) { |
| size_t offset = from & (kPageCacheSize - 1); |
| Page *page = nullptr; |
| |
| if (!offset) |
| return; |
| |
| if (FindDataPage(from >> kPageCacheShift, &page) != ZX_OK) |
| return; |
| |
| #if 0 // porting needed |
| // lock_page(page); |
| #endif |
| WaitOnPageWriteback(page); |
| zero_user(page, offset, kPageCacheSize - offset); |
| #if 0 // porting needed |
| // set_page_dirty(page); |
| #else |
| FlushDirtyDataPage(Vfs(), page); |
| #endif |
| F2fsPutPage(page, 1); |
| } |
| |
| zx_status_t VnodeF2fs::TruncateBlocks(uint64_t from) { |
| SbInfo &sbi = Vfs()->GetSbInfo(); |
| unsigned int blocksize = sbi.blocksize; |
| DnodeOfData dn; |
| int count = 0; |
| zx_status_t err; |
| |
| if (from > GetSize()) |
| return ZX_OK; |
| |
| pgoff_t free_from = static_cast<pgoff_t>((from + blocksize - 1) >> (sbi.log_blocksize)); |
| |
| mutex_lock_op(&sbi, LockType::kDataTrunc); |
| |
| do { |
| SetNewDnode(&dn, this, nullptr, nullptr, 0); |
| err = Vfs()->Nodemgr().GetDnodeOfData(&dn, free_from, kRdOnlyNode); |
| if (err) { |
| if (err == ZX_ERR_NOT_FOUND) |
| break; |
| mutex_unlock_op(&sbi, LockType::kDataTrunc); |
| return err; |
| } |
| |
| if (IsInode(dn.node_page)) |
| count = kAddrsPerInode; |
| else |
| count = kAddrsPerBlock; |
| |
| count -= dn.ofs_in_node; |
| ZX_ASSERT(count >= 0); |
| if (dn.ofs_in_node || IsInode(dn.node_page)) { |
| TruncateDataBlocksRange(&dn, count); |
| free_from += count; |
| } |
| |
| F2fsPutDnode(&dn); |
| } while (false); |
| |
| err = Vfs()->Nodemgr().TruncateInodeBlocks(this, free_from); |
| mutex_unlock_op(&sbi, LockType::kDataTrunc); |
| |
| /* lastly zero out the first data page */ |
| TruncatePartialDataPage(from); |
| |
| return err; |
| } |
| |
| zx_status_t VnodeF2fs::TruncateHole(pgoff_t pg_start, pgoff_t pg_end) { |
| pgoff_t index; |
| |
| for (index = pg_start; index < pg_end; index++) { |
| DnodeOfData dn; |
| SbInfo &sbi = Vfs()->GetSbInfo(); |
| |
| fbl::AutoLock lock(&sbi.fs_lock[static_cast<int>(LockType::kDataTrunc)]); |
| SetNewDnode(&dn, this, NULL, NULL, 0); |
| if (zx_status_t err = Vfs()->Nodemgr().GetDnodeOfData(&dn, index, kRdOnlyNode); err != ZX_OK) { |
| if (err == ZX_ERR_NOT_FOUND) |
| continue; |
| return err; |
| } |
| |
| if (dn.data_blkaddr != kNullAddr) |
| TruncateDataBlocksRange(&dn, 1); |
| F2fsPutDnode(&dn); |
| } |
| return ZX_OK; |
| } |
| |
| /** |
| * Called at the last Iput() if i_nlink is zero |
| */ |
| #if 0 // porting needed |
| // void VnodeF2fs::F2fsEvictInode() { |
| // SbInfo &sbi = Vfs()->GetSbInfo(); |
| |
| // // truncate_inode_pages(&inode->i_data, 0); |
| |
| // if (ino_ == NodeIno(&sbi) || ino_ == MetaIno(&sbi)) |
| // goto no_delete; |
| |
| // // BUG_ON(AtomicRead(&fi.->dirty_dents)); |
| // // remove_dirty_dir_inode(this); |
| |
| // if (i_nlink || IsBadInode(this)) |
| // goto no_delete; |
| |
| // SetInodeFlag(&fi, InodeInfoFlag::kNoAlloc); |
| // i_size = 0; |
| |
| // if (HasBlocks(this)) |
| // F2fsTruncate(); |
| |
| // // remove_inode_page(inode); |
| // no_delete: |
| // ClearInode(this); |
| // } |
| #endif |
| |
| void VnodeF2fs::Init() { |
| mtx_init(&i_mutex_, mtx_plain); |
| memset(&fi_, 0, sizeof(InodeInfo)); |
| #if 0 // porting needed |
| // AtomicSet(&vnode->fi.vfs_inode.i_version, 1); |
| #endif |
| AtomicSet(&fi_.dirty_dents, 0); |
| SetCurDirDepth(1); |
| RwlockInit(&fi_.ext.ext_lock); |
| SetInodeFlag(InodeInfoFlag::kNewInode); |
| } |
| |
| void VnodeF2fs::MarkInodeDirty() { WriteInode(nullptr); } |
| |
| void VnodeF2fs::Sync(SyncCallback closure) { |
| SyncFile(0, GetSize(), 0); |
| closure(ZX_OK); |
| } |
| |
| zx_status_t VnodeF2fs::QueryFilesystem(fuchsia_io::wire::FilesystemInfo *info) { |
| SbInfo &sbi = Vfs()->GetSbInfo(); |
| *info = {}; |
| info->block_size = kBlockSize; |
| info->max_filename_size = kMaxNameLen; |
| info->fs_type = VFS_TYPE_F2FS; |
| info->fs_id = Vfs()->FsId(); |
| info->total_bytes = sbi.user_block_count * kBlockSize; |
| info->used_bytes = ValidUserBlocks(&sbi) * kBlockSize; |
| info->total_nodes = sbi.total_node_count; |
| info->used_nodes = sbi.total_valid_inode_count; |
| |
| constexpr std::string_view kFsName = "f2fs"; |
| static_assert(kFsName.size() + 1 < fuchsia_io::wire::kMaxFsNameBuffer, "f2fs name too long"); |
| info->name[kFsName.copy(reinterpret_cast<char *>(info->name.data()), |
| fuchsia_io::wire::kMaxFsNameBuffer - 1)] = '\0'; |
| |
| // TODO(unknown): Fill info->free_shared_pool_bytes using fvm info |
| |
| return ZX_OK; |
| } |
| |
| zx_status_t VnodeF2fs::SyncFile(loff_t start, loff_t end, int datasync) { |
| SbInfo &sbi = Vfs()->GetSbInfo(); |
| uint64_t cur_version; |
| zx_status_t ret = 0; |
| bool need_cp = false; |
| #if 0 // porting needed |
| // WritebackControl wbc; |
| |
| // wbc.sync_mode = WB_SYNC_ALL; |
| // wbc.nr_to_write = LONG_MAX; |
| // wbc.for_reclaim = 0; |
| |
| // ret = filemap_write_and_wait_range(inode->i_mapping, start, end); |
| // if (ret) |
| // return ret; |
| #endif |
| |
| fbl::AutoLock lock(&i_mutex_); |
| |
| #if 0 // porting needed |
| // if (inode->i_sb->s_flags & MS_RDONLY) |
| // goto out; |
| // if (datasync && !(i_state & I_DIRTY_DATASYNC)) { |
| // goto out; |
| //} |
| #endif |
| |
| { |
| fbl::AutoLock cplock(&sbi.cp_mutex); |
| cur_version = LeToCpu(GetCheckpoint(&sbi)->checkpoint_ver); |
| } |
| |
| #if 0 // porting needed |
| // if (fi.data_version != cur_version && |
| // !(i_state & I_DIRTY)) { |
| // goto out; |
| //} |
| #endif |
| fi_.data_version--; |
| |
| if (!IsReg() || GetNlink() != 1) |
| need_cp = true; |
| if (IsInodeFlagSet(InodeInfoFlag::kNeedCp)) |
| need_cp = true; |
| if (!Vfs()->SpaceForRollForward()) |
| need_cp = true; |
| if (TestOpt(&sbi, kMountDisableRollForward) || NeedToSyncDir()) |
| need_cp = true; |
| |
| // TODO: it intended to update cached page for this, but |
| // the current impl. writes out inode in a sync manner |
| // WriteInode(nullptr); |
| |
| if (need_cp) { |
| // TODO: remove it after implementing cache |
| WriteInode(nullptr); |
| // all the dirty node pages should be flushed for POR |
| ret = Vfs()->SyncFs(1); |
| ClearInodeFlag(InodeInfoFlag::kNeedCp); |
| } else { |
| // TODO: it intended to flush all dirty node pages on cache |
| // remove it after cache |
| Page *node_page = nullptr; |
| int mark = !Vfs()->Nodemgr().IsCheckpointedNode(Ino()); |
| if (ret = Vfs()->Nodemgr().GetNodePage(Ino(), &node_page); ret != ZX_OK) { |
| return ret; |
| } |
| |
| Vfs()->Nodemgr().SetFsyncMark(node_page, 1); |
| Vfs()->Nodemgr().SetDentryMark(node_page, mark); |
| |
| UpdateInode(node_page); |
| F2fsPutPage(node_page, 1); |
| |
| #if 0 // porting needed |
| // while (Vfs()->Nodemgr().SyncNodePages(Ino(), &wbc) == 0) |
| // WriteInode(nullptr); |
| // filemap_fdatawait_range(nullptr,//sbi->node_inode->i_mapping, |
| // 0, LONG_MAX); |
| #endif |
| } |
| #if 0 // porting needed |
| // out: |
| #endif |
| return ret; |
| } |
| |
| int VnodeF2fs::NeedToSyncDir() { |
| #if 0 // porting needed |
| // dentry = d_find_any_alias(vnode); |
| // if (!dentry) { |
| // // Iput(inode); |
| // return 0; |
| // } |
| // pino = dentry->d_parent->d_inode->i_ino; |
| // dput(dentry); |
| // Iput(); |
| #endif |
| ZX_ASSERT(GetParentNid() < kNullIno); |
| return !Vfs()->Nodemgr().IsCheckpointedNode(GetParentNid()); |
| } |
| |
| void VnodeF2fs::Notify(std::string_view name, unsigned event) { watcher_.Notify(name, event); } |
| |
| zx_status_t VnodeF2fs::WatchDir(fs::Vfs *vfs, uint32_t mask, uint32_t options, |
| zx::channel watcher) { |
| return watcher_.WatchDir(vfs, this, mask, options, std::move(watcher)); |
| } |
| |
| inline void VnodeF2fs::GetExtentInfo(const Extent &i_ext) { |
| WriteLock(&fi_.ext.ext_lock); |
| fi_.ext.fofs = LeToCpu(i_ext.fofs); |
| fi_.ext.blk_addr = LeToCpu(i_ext.blk_addr); |
| fi_.ext.len = LeToCpu(i_ext.len); |
| WriteUnlock(&fi_.ext.ext_lock); |
| } |
| |
| inline void VnodeF2fs::SetRawExtent(Extent &i_ext) { |
| ReadLock(&fi_.ext.ext_lock); |
| i_ext.fofs = CpuToLe(fi_.ext.fofs); |
| i_ext.blk_addr = CpuToLe(fi_.ext.blk_addr); |
| i_ext.len = CpuToLe(fi_.ext.len); |
| ReadUnlock(&fi_.ext.ext_lock); |
| } |
| |
| } // namespace f2fs |