blob: b2ba304868b2e06581223613860449c775ebde53 [file] [log] [blame]
// Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <dirent.h>
#include <stdint.h>
#include <string.h>
#include <sys/stat.h>
#include "f2fs.h"
namespace f2fs {
VnodeF2fs::VnodeF2fs(F2fs *fs) : fs_(fs) {}
VnodeF2fs::VnodeF2fs(F2fs *fs, ino_t ino) : fs_(fs), ino_(ino) {}
bool VnodeF2fs::IsDirectory() { return S_ISDIR(i_mode_); }
fs::VnodeProtocolSet VnodeF2fs::GetProtocols() const {
if (S_ISDIR(i_mode_)) {
return fs::VnodeProtocol::kDirectory;
} else {
return fs::VnodeProtocol::kFile;
}
}
zx_status_t VnodeF2fs::GetNodeInfoForProtocol([[maybe_unused]] fs::VnodeProtocol protocol,
[[maybe_unused]] fs::Rights rights,
fs::VnodeRepresentation *info) {
if (IsDirectory()) {
*info = fs::VnodeRepresentation::Directory();
} else {
*info = fs::VnodeRepresentation::File();
}
return ZX_OK;
}
void VnodeF2fs::Allocate(F2fs *fs, ino_t ino, uint32_t mode, fbl::RefPtr<VnodeF2fs> *out) {
/* Check if ino is within scope */
CheckNidRange(&fs->GetSbInfo(), ino);
if (S_ISDIR(mode)) {
*out = fbl::MakeRefCounted<Dir>(fs, ino);
} else {
*out = fbl::MakeRefCounted<File>(fs, ino);
}
VnodeF2fs *vnode = out->get();
mtx_init(&vnode->i_mutex_, mtx_plain);
memset(&vnode->fi_, 0, sizeof(InodeInfo));
#if 0 // porting needed
// AtomicSet(&vnode->fi.vfs_inode.i_version, 1);
#endif
AtomicSet(&vnode->fi_.dirty_dents, 0);
vnode->fi_.i_current_depth = 1;
vnode->fi_.i_advise = 0;
RwlockInit(&vnode->fi_.ext.ext_lock);
SetInodeFlag(&vnode->fi_, InodeInfoFlag::kNewInode);
}
// TODO(sukka): fill vfs->members in addtion to size
// TODO(sukka): if dir/file vnode are defined as different class, check if the ino is for dir/file
void VnodeF2fs::Create(F2fs *fs, ino_t ino, fbl::RefPtr<VnodeF2fs> *out) {
Page *node_page = nullptr;
Inode *ri;
Node *rn;
if (ino == NodeIno(&fs->GetSbInfo()) || ino == MetaIno(&fs->GetSbInfo())) {
*out = fbl::MakeRefCounted<VnodeF2fs>(fs, ino);
return;
}
/* Check if ino is within scope */
CheckNidRange(&fs->GetSbInfo(), ino);
if (fs->Nodemgr().GetNodePage(ino, &node_page) != ZX_OK) {
return;
}
rn = static_cast<Node *>(PageAddress(node_page));
ri = &(rn->i);
// [sukka] need to check result?
if (S_ISDIR(ri->i_mode)) {
*out = fbl::MakeRefCounted<Dir>(fs, ino);
} else {
*out = fbl::MakeRefCounted<File>(fs, ino);
}
VnodeF2fs *vnode = out->get();
mtx_init(&vnode->i_mutex_, mtx_plain);
vnode->i_mode_ = LeToCpu(ri->i_mode);
vnode->i_uid_ = LeToCpu(ri->i_uid);
vnode->i_gid_ = LeToCpu(ri->i_gid);
vnode->i_nlink_ = ri->i_links;
vnode->i_size_ = LeToCpu(ri->i_size);
vnode->i_blocks_ = LeToCpu(ri->i_blocks);
vnode->i_atime_.tv_sec = LeToCpu(ri->i_atime);
vnode->i_ctime_.tv_sec = LeToCpu(ri->i_ctime);
vnode->i_mtime_.tv_sec = LeToCpu(ri->i_mtime);
vnode->i_atime_.tv_nsec = LeToCpu(ri->i_atime_nsec);
vnode->i_ctime_.tv_nsec = LeToCpu(ri->i_ctime_nsec);
vnode->i_mtime_.tv_nsec = LeToCpu(ri->i_mtime_nsec);
vnode->i_generation_ = LeToCpu(ri->i_generation);
vnode->UpdateParentNid(ri->i_pino);
vnode->fi_.i_current_depth = LeToCpu(ri->i_current_depth);
vnode->fi_.i_xattr_nid = LeToCpu(ri->i_xattr_nid);
vnode->fi_.i_flags = LeToCpu(ri->i_flags);
vnode->fi_.flags = 0;
#if 0 // porting needed
// vnode->fi.data_version = LeToCpu(GetCheckpoint(sbi)->checkpoint_ver) - 1;
#endif
vnode->fi_.i_advise = ri->i_advise;
RwlockInit(&vnode->fi_.ext.ext_lock);
GetExtentInfo(&vnode->fi_.ext, ri->i_ext);
vnode->i_name_sp_ = std::string_view(reinterpret_cast<char *>(ri->i_name), ri->i_namelen);
F2fsPutPage(node_page, 1);
}
zx_status_t VnodeF2fs::Open([[maybe_unused]] ValidatedOptions options,
fbl::RefPtr<Vnode> *out_redirect) {
fd_count_++;
return ZX_OK;
}
zx_status_t VnodeF2fs::Close() {
fd_count_--;
return ZX_OK;
}
void VnodeF2fs::RecycleNode() {
// TODO:
// implement the desired caching behavior
// currently, it remove its entry from vnode table and delete
fs_->EraseVnodeFromTable(this);
delete this;
}
zx_status_t VnodeF2fs::GetAttributes(fs::VnodeAttributes *a) {
#ifdef F2FS_BU_DEBUG
FX_LOGS(DEBUG) << "f2fs_getattr() vn=" << this << "(#" << ino_ << ")";
#endif
*a = fs::VnodeAttributes();
a->mode = i_mode_;
a->inode = ino_;
a->content_size = i_size_;
a->storage_size = GetBlockCount() * kBlockSize;
a->link_count = i_nlink_;
a->creation_time = zx_time_add_duration(ZX_SEC(i_ctime_.tv_sec), i_ctime_.tv_nsec);
a->modification_time = zx_time_add_duration(ZX_SEC(i_mtime_.tv_sec), i_mtime_.tv_nsec);
return ZX_OK;
}
zx_status_t VnodeF2fs::SetAttributes(fs::VnodeAttributesUpdate attr) {
bool need_inode_sync = false;
if (attr.has_creation_time()) {
i_ctime_ = zx_timespec_from_duration(attr.take_creation_time());
need_inode_sync = true;
}
if (attr.has_modification_time()) {
i_mtime_ = zx_timespec_from_duration(attr.take_modification_time());
need_inode_sync = true;
}
if (attr.any()) {
return ZX_ERR_INVALID_ARGS;
}
if (need_inode_sync) {
MarkInodeDirty(this);
}
return ZX_OK;
}
struct f2fs_iget_args {
uint64_t ino;
int on_free;
};
#if 0 // porting needed
// void VnodeF2fs::F2fsSetInodeFlags() {
// uint64_t &flags = fi.i_flags;
// inode_.i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE |
// S_NOATIME | S_DIRSYNC);
// if (flags & FS_SYNC_FL)
// inode_.i_flags |= S_SYNC;
// if (flags & FS_APPEND_FL)
// inode_.i_flags |= S_APPEND;
// if (flags & FS_IMMUTABLE_FL)
// inode_.i_flags |= S_IMMUTABLE;
// if (flags & FS_NOATIME_FL)
// inode_.i_flags |= S_NOATIME;
// if (flags & FS_DIRSYNC_FL)
// inode_.i_flags |= S_DIRSYNC;
// }
// int VnodeF2fs::F2fsIgetTest(void *data) {
// f2fs_iget_args *args = (f2fs_iget_args *)data;
// if (ino_ != args->ino)
// return 0;
// if (i_state & (I_FREEING | I_WILL_FREE)) {
// args->on_free = 1;
// return 0;
// }
// return 1;
// }
// VnodeF2fs *VnodeF2fs::F2fsIgetNowait(uint64_t ino) {
// fbl::RefPtr<VnodeF2fs> vnode_refptr;
// VnodeF2fs *vnode = nullptr;
// f2fs_iget_args args = {.ino = ino, .on_free = 0};
// vnode = ilookup5(sb, ino, F2fsIgetTest, &args);
// if (vnode)
// return vnode;
// if (!args.on_free) {
// Vget(Vfs(), ino, &vnode_refptr);
// vnode = vnode_refptr.get();
// return vnode;
// }
// return static_cast<VnodeF2fs *>(ErrPtr(ZX_ERR_NOT_FOUND));
// }
#endif
zx_status_t VnodeF2fs::Vget(F2fs *fs, uint64_t ino, fbl::RefPtr<VnodeF2fs> *out) {
fbl::RefPtr<VnodeF2fs> vnode_refptr;
VnodeF2fs *vnode;
if (fs->FindVnode(&vnode_refptr, ino) == ZX_OK) {
*out = std::move(vnode_refptr);
return ZX_OK;
}
Create(fs, ino, &vnode_refptr);
vnode = vnode_refptr.get();
if (vnode == nullptr) {
return ZX_ERR_NO_MEMORY;
}
fs->InsertVnode(vnode);
fbl::AutoLock lock(&vnode->v_lock_);
#if 0 // porting needed
// if (!(vnode->i_state & I_NEW))
// return vnode;
#endif
if (!(ino == NodeIno(&fs->GetSbInfo()) || ino == MetaIno(&fs->GetSbInfo()))) {
if (!fs->GetSbInfo().por_doing && vnode->i_nlink_ == 0) {
#if 0 // porting needed
// iget_failed(inode);
#endif
return ZX_ERR_NOT_FOUND;
}
}
#if 0 // porting needed
// if (ino == NodeIno(sbi)) {
// // inode->i_mapping->a_ops = &Node_aops; //invalidatepage, releasepage
// // mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
// } else if (ino == MetaIno(sbi)) {
// // inode->i_mapping->a_ops = &f2fs_meta_aops; //empty
// // mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
// } else if (S_ISREG(inode->i_mode)) {
// // inode->i_op = &f2fs_file_inode_operations; //empty
// // inode->i_fop = &f2fs_file_operations; //empty
// // inode->i_mapping->a_ops = &f2fs_dblock_aops;
// } else if (S_ISDIR(inode->i_mode)) {
// // inode->i_op = &f2fs_dir_inode_operations; //lookup only
// // inode->i_fop = &f2fs_dir_operations; //read, readdir
// // inode->i_mapping->a_ops = &f2fs_dblock_aops; //readpage, readpages, invalidatepage,
// releasepage
// // mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER_MOVABLE |
// // __GFP_ZERO);
// } else if (S_ISLNK(inode->i_mode)) {
// // inode->i_op = &f2fs_symlink_inode_operations; //empty
// // inode->i_mapping->a_ops = &f2fs_dblock_aops;
// } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
// S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
// // inode->i_op = &f2fs_special_inode_operations; //empty
// init_special_inode(inode, inode->i_mode, inode->i_rdev);
// } else {
// iget_failed(inode);
// return ZX_ERR_IO;
// }
#endif
*out = std::move(vnode_refptr);
return ZX_OK;
}
void VnodeF2fs::UpdateInode(Page *node_page) {
Node *rn;
Inode *ri;
WaitOnPageWriteback(node_page);
rn = static_cast<Node *>(PageAddress(node_page));
ri = &(rn->i);
ri->i_mode = CpuToLe(i_mode_);
ri->i_advise = fi_.i_advise;
ri->i_uid = CpuToLe(i_uid_);
ri->i_gid = CpuToLe(i_gid_);
ri->i_links = CpuToLe(i_nlink_);
ri->i_size = CpuToLe(static_cast<uint64_t>(i_size_));
ri->i_blocks = CpuToLe(static_cast<uint64_t>(i_blocks_));
set_raw_extent(&fi_.ext, &ri->i_ext);
ri->i_atime = CpuToLe(static_cast<uint64_t>(i_atime_.tv_sec));
ri->i_ctime = CpuToLe(static_cast<uint64_t>(i_ctime_.tv_sec));
ri->i_mtime = CpuToLe(static_cast<uint64_t>(i_mtime_.tv_sec));
ri->i_atime_nsec = CpuToLe(static_cast<uint32_t>(i_atime_.tv_nsec));
ri->i_ctime_nsec = CpuToLe(static_cast<uint32_t>(i_ctime_.tv_nsec));
ri->i_mtime_nsec = CpuToLe(static_cast<uint64_t>(i_mtime_.tv_nsec));
ri->i_current_depth = CpuToLe(fi_.i_current_depth);
ri->i_xattr_nid = CpuToLe(fi_.i_xattr_nid);
ri->i_flags = CpuToLe(fi_.i_flags);
ri->i_generation = CpuToLe(i_generation_);
#if 0 // porting needed
// set_page_dirty(node_page);
#else
FlushDirtyNodePage(Vfs(), node_page);
#endif
}
int VnodeF2fs::WriteInode(WritebackControl *wbc) TA_NO_THREAD_SAFETY_ANALYSIS {
SbInfo &sbi = Vfs()->GetSbInfo();
Page *node_page = nullptr;
zx_status_t ret = ZX_OK;
if (ino_ == NodeIno(&sbi) || ino_ == MetaIno(&sbi))
return ret;
if (ret = Vfs()->Nodemgr().GetNodePage(ino_, &node_page); ret != ZX_OK)
return ret;
if (PageDirty(node_page)) {
UpdateInode(node_page);
F2fsPutPage(node_page, 1);
} else {
F2fsPutPage(node_page, 1);
fbl::AutoLock lock(&sbi.write_inode);
if (ret = Vfs()->Nodemgr().GetNodePage(ino_, &node_page); ret != ZX_OK)
return ret;
UpdateInode(node_page);
F2fsPutPage(node_page, 1);
}
return ZX_OK;
}
zx_status_t VnodeF2fs::DoTruncate() {
zx_status_t ret;
if (ret = TruncateBlocks(i_size_); ret == ZX_OK) {
clock_gettime(CLOCK_REALTIME, &i_mtime_);
i_ctime_ = i_mtime_;
MarkInodeDirty(this);
}
Vfs()->Segmgr().BalanceFs();
return ret;
}
int VnodeF2fs::TruncateDataBlocksRange(DnodeOfData *dn, int count) {
int nr_free = 0, ofs = dn->ofs_in_node;
SbInfo &sbi = Vfs()->GetSbInfo();
Node *raw_node;
uint32_t *addr;
raw_node = static_cast<Node *>(PageAddress(dn->node_page));
addr = BlkaddrInNode(raw_node) + ofs;
for (; count > 0; count--, addr++, dn->ofs_in_node++) {
block_t blkaddr = LeToCpu(*addr);
if (blkaddr == kNullAddr)
continue;
UpdateExtentCache(kNullAddr, dn);
Vfs()->Segmgr().InvalidateBlocks(blkaddr);
DecValidBlockCount(&sbi, dn->vnode, 1);
nr_free++;
}
if (nr_free) {
#if 0 // porting needed
// set_page_dirty(dn->node_page);
#else
FlushDirtyNodePage(Vfs(), dn->node_page);
#endif
Vfs()->Nodemgr().SyncInodePage(dn);
}
dn->ofs_in_node = ofs;
return nr_free;
}
void VnodeF2fs::TruncateDataBlocks(DnodeOfData *dn) { TruncateDataBlocksRange(dn, kAddrsPerBlock); }
void VnodeF2fs::TruncatePartialDataPage(uint64_t from) {
size_t offset = from & (kPageCacheSize - 1);
Page *page = nullptr;
if (!offset)
return;
if (FindDataPage(from >> kPageCacheShift, &page) != ZX_OK)
return;
#if 0 // porting needed
// lock_page(page);
#endif
WaitOnPageWriteback(page);
zero_user(page, offset, kPageCacheSize - offset);
#if 0 // porting needed
// set_page_dirty(page);
#else
FlushDirtyDataPage(Vfs(), page);
#endif
F2fsPutPage(page, 1);
}
zx_status_t VnodeF2fs::TruncateBlocks(uint64_t from) {
SbInfo &sbi = Vfs()->GetSbInfo();
unsigned int blocksize = sbi.blocksize;
DnodeOfData dn;
pgoff_t free_from;
int count = 0;
zx_status_t err;
free_from = static_cast<pgoff_t>((from + blocksize - 1) >> (sbi.log_blocksize));
mutex_lock_op(&sbi, LockType::kDataTrunc);
do {
SetNewDnode(&dn, this, nullptr, nullptr, 0);
err = Vfs()->Nodemgr().GetDnodeOfData(&dn, free_from, kRdOnlyNode);
if (err) {
if (err == ZX_ERR_NOT_FOUND)
break;
mutex_unlock_op(&sbi, LockType::kDataTrunc);
return err;
}
if (IsInode(dn.node_page))
count = kAddrsPerInode;
else
count = kAddrsPerBlock;
count -= dn.ofs_in_node;
ZX_ASSERT(count >= 0);
if (dn.ofs_in_node || IsInode(dn.node_page)) {
TruncateDataBlocksRange(&dn, count);
free_from += count;
}
F2fsPutDnode(&dn);
} while (false);
err = Vfs()->Nodemgr().TruncateInodeBlocks(this, free_from);
mutex_unlock_op(&sbi, LockType::kDataTrunc);
/* lastly zero out the first data page */
TruncatePartialDataPage(from);
return err;
}
zx_status_t VnodeF2fs::TruncateHole(pgoff_t pg_start, pgoff_t pg_end) {
pgoff_t index;
for (index = pg_start; index < pg_end; index++) {
DnodeOfData dn;
SbInfo &sbi = Vfs()->GetSbInfo();
fbl::AutoLock lock(&sbi.fs_lock[static_cast<int>(LockType::kDataTrunc)]);
SetNewDnode(&dn, this, NULL, NULL, 0);
if (zx_status_t err = Vfs()->Nodemgr().GetDnodeOfData(&dn, index, kRdOnlyNode); err != ZX_OK) {
if (err == ZX_ERR_NOT_FOUND)
continue;
return err;
}
if (dn.data_blkaddr != kNullAddr)
TruncateDataBlocksRange(&dn, 1);
F2fsPutDnode(&dn);
}
return ZX_OK;
}
/**
* Called at the last Iput() if i_nlink is zero
*/
#if 0 // porting needed
// void VnodeF2fs::F2fsEvictInode() {
// SbInfo &sbi = Vfs()->GetSbInfo();
// // truncate_inode_pages(&inode->i_data, 0);
// if (ino_ == NodeIno(&sbi) || ino_ == MetaIno(&sbi))
// goto no_delete;
// // BUG_ON(AtomicRead(&fi.->dirty_dents));
// // remove_dirty_dir_inode(this);
// if (i_nlink || IsBadInode(this))
// goto no_delete;
// SetInodeFlag(&fi, InodeInfoFlag::kNoAlloc);
// i_size = 0;
// if (HasBlocks(this))
// F2fsTruncate();
// // remove_inode_page(inode);
// no_delete:
// ClearInode(this);
// }
#endif
void VnodeF2fs::IncNlink() { i_nlink_++; }
void VnodeF2fs::DropNlink() { i_nlink_--; }
void VnodeF2fs::ClearNlink() { i_nlink_ = 0; }
void VnodeF2fs::SetNlink(uint32_t nlink) { i_nlink_ = nlink; }
uint64_t VnodeF2fs::GetBlockCount() { return (i_size_ + kBlockSize - 1) / kBlockSize; }
void MarkInodeDirty(VnodeF2fs *vnode) { vnode->WriteInode(nullptr); }
void VnodeF2fs::Sync(SyncCallback closure) {
SyncFile(0, i_size_, 0);
closure(ZX_OK);
}
zx_status_t VnodeF2fs::QueryFilesystem(fuchsia_io::wire::FilesystemInfo *info) {
SbInfo &sbi = Vfs()->GetSbInfo();
*info = {};
info->block_size = kBlockSize;
info->max_filename_size = kMaxNameLen;
info->fs_type = VFS_TYPE_F2FS;
info->fs_id = Vfs()->FsId();
info->total_bytes = sbi.user_block_count * kBlockSize;
info->used_bytes = ValidUserBlocks(&sbi) * kBlockSize;
info->total_nodes = sbi.total_node_count;
info->used_nodes = sbi.total_valid_inode_count;
constexpr std::string_view kFsName = "f2fs";
static_assert(kFsName.size() + 1 < fuchsia_io::wire::kMaxFsNameBuffer, "f2fs name too long");
info->name[kFsName.copy(reinterpret_cast<char *>(info->name.data()),
fuchsia_io::wire::kMaxFsNameBuffer - 1)] = '\0';
// TODO(unknown): Fill info->free_shared_pool_bytes using fvm info
return ZX_OK;
}
zx_status_t VnodeF2fs::SyncFile(loff_t start, loff_t end, int datasync) {
SbInfo &sbi = Vfs()->GetSbInfo();
uint64_t cur_version;
zx_status_t ret = 0;
bool need_cp = false;
#if 0 // porting needed
// WritebackControl wbc;
// wbc.sync_mode = WB_SYNC_ALL;
// wbc.nr_to_write = LONG_MAX;
// wbc.for_reclaim = 0;
// ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
// if (ret)
// return ret;
#endif
fbl::AutoLock lock(&i_mutex_);
#if 0 // porting needed
// if (inode->i_sb->s_flags & MS_RDONLY)
// goto out;
// if (datasync && !(i_state & I_DIRTY_DATASYNC)) {
// goto out;
//}
#endif
do {
fbl::AutoLock cplock(&sbi.cp_mutex);
cur_version = LeToCpu(GetCheckpoint(&sbi)->checkpoint_ver);
} while (false);
#if 0 // porting needed
// if (fi.data_version != cur_version &&
// !(i_state & I_DIRTY)) {
// goto out;
//}
#endif
fi_.data_version--;
if (!S_ISREG(i_mode_) || i_nlink_ != 1)
need_cp = true;
if (IsInodeFlagSet(&fi_, InodeInfoFlag::kNeedCp))
need_cp = true;
if (!Vfs()->SpaceForRollForward())
need_cp = true;
if (TestOpt(&sbi, kMountDisableRollForward) || NeedToSyncDir())
need_cp = true;
// TODO: it intended to update cached page for this, but
// the current impl. writes out inode in a sync manner
// WriteInode(nullptr);
if (need_cp) {
// TODO: remove it after implementing cache
WriteInode(nullptr);
// all the dirty node pages should be flushed for POR
ret = Vfs()->SyncFs(1);
ClearInodeFlag(&fi_, InodeInfoFlag::kNeedCp);
} else {
// TODO: it intended to flush all dirty node pages on cache
// remove it after cache
Page *node_page = nullptr;
int mark = !Vfs()->Nodemgr().IsCheckpointedNode(Ino());
if (ret = Vfs()->Nodemgr().GetNodePage(Ino(), &node_page); ret != ZX_OK) {
return ret;
}
Vfs()->Nodemgr().SetFsyncMark(node_page, 1);
Vfs()->Nodemgr().SetDentryMark(node_page, mark);
UpdateInode(node_page);
F2fsPutPage(node_page, 1);
#if 0 // porting needed
// while (Vfs()->Nodemgr().SyncNodePages(Ino(), &wbc) == 0)
// WriteInode(nullptr);
// filemap_fdatawait_range(nullptr,//sbi->node_inode->i_mapping,
// 0, LONG_MAX);
#endif
}
#if 0 // porting needed
// out:
#endif
return ret;
}
int VnodeF2fs::NeedToSyncDir() {
#if 0 // porting needed
// dentry = d_find_any_alias(vnode);
// if (!dentry) {
// // Iput(inode);
// return 0;
// }
// pino = dentry->d_parent->d_inode->i_ino;
// dput(dentry);
// Iput();
#endif
ZX_ASSERT(i_pino_ < kNullIno);
return !Vfs()->Nodemgr().IsCheckpointedNode(i_pino_);
}
void VnodeF2fs::Notify(std::string_view name, unsigned event) { watcher_.Notify(name, event); }
zx_status_t VnodeF2fs::WatchDir(fs::Vfs *vfs, uint32_t mask, uint32_t options,
zx::channel watcher) {
return watcher_.WatchDir(vfs, this, mask, options, std::move(watcher));
}
} // namespace f2fs