[f2fs] Correct fs_lock and Use C++ locking primitives

This change uses shared locks for fs_lock instead of exclusive ones.
fs_lock is used to block file operations during cp, and thus threads
don't need to compete for the exclusive locks in normal io path.
It adds a new file-wide VnodeF2fs::io_lock_ that protects on-disk
file contents from simultaneous accesses. When pager is availabe,
page-wide locking can replace it. Vnode::mutex_ protects
vnode members.

BUG: 78140

Test: fx test f2fs-unittest f2fs-fs-tests f2fs-slow-fs-tests

Change-Id: I759fe0af08ec8b17970ca21fa7bc7b19417f4b53
Reviewed-on: https://fuchsia-review.googlesource.com/c/third_party/f2fs/+/567605
Reviewed-by: Brett Wilson <brettw@google.com>
diff --git a/admin.cc b/admin.cc
index 93591e2..190b639 100644
--- a/admin.cc
+++ b/admin.cc
@@ -14,8 +14,7 @@
 
 void AdminService::Shutdown(ShutdownRequestView request, ShutdownCompleter::Sync& completer) {
   f2fs_->PutSuper();
-  f2fs_->bc_.reset();
-
+  f2fs_->ResetBc();
   completer.Reply();
 }
 
diff --git a/checkpoint.cc b/checkpoint.cc
index eac0e48..3290833 100644
--- a/checkpoint.cc
+++ b/checkpoint.cc
@@ -160,10 +160,9 @@
    * for cp pack we can have max 1020*507 orphan entries
    */
   max_orphans = (sbi.blocks_per_seg - 5) * kOrphansPerBlock;
-  mtx_lock(&sbi.orphan_inode_mutex);
+  std::lock_guard lock(sbi.orphan_inode_mutex);
   if (sbi.n_orphans >= max_orphans)
     err = ZX_ERR_NO_SPACE;
-  mtx_unlock(&sbi.orphan_inode_mutex);
   return err;
 }
 
@@ -178,42 +177,34 @@
 }
 void F2fs::AddOrphanInode(nid_t ino) {
   SbInfo &sbi = GetSbInfo();
-  list_node_t *head, *this_node;
   OrphanInodeEntry *new_entry = nullptr, *orphan = nullptr;
 
-  mtx_lock(&sbi.orphan_inode_mutex);
-  head = &sbi.orphan_inode_list;
+  std::lock_guard lock(sbi.orphan_inode_mutex);
+  list_node_t *head = &sbi.orphan_inode_list, *this_node;
   list_for_every(head, this_node) {
     orphan = containerof(this_node, OrphanInodeEntry, list);
     if (orphan->ino == ino)
-      goto out;
+      return;
     if (orphan->ino > ino)
       break;
     orphan = nullptr;
   }
-retry:
-#if 0  // porting needed
-  // new_entry = kmem_cache_alloc(orphan_entry_slab, GFP_ATOMIC);
-#else
+
+  // TODO: it never fails
   new_entry = new OrphanInodeEntry;
-#endif
-  if (!new_entry) {
-#if 0  // porting needed
-    // cond_resched();
-#endif
-    goto retry;
-  }
+  ZX_ASSERT(new_entry != nullptr);
+
   new_entry->ino = ino;
   list_initialize(&new_entry->list);
 
-  /* add new_oentry into list which is sorted by inode number */
+  // add new_entry into list which is sorted by inode number
   if (orphan) {
     OrphanInodeEntry *prev;
 
-    /* get previous entry */
+    // get previous entry
     prev = containerof(orphan->list.prev, OrphanInodeEntry, list);
     if (&prev->list != head) {
-      /* insert new orphan inode entry */
+      // insert new orphan inode entry
       list_add(&prev->list, &new_entry->list);
     } else {
       list_add(head, &new_entry->list);
@@ -222,8 +213,6 @@
     list_add_tail(head, &new_entry->list);
   }
   sbi.n_orphans++;
-out:
-  mtx_unlock(&sbi.orphan_inode_mutex);
 }
 
 void F2fs::RemoveOrphanInode(nid_t ino) {
@@ -231,7 +220,7 @@
   list_node_t *this_node, *next, *head;
   OrphanInodeEntry *orphan;
 
-  mtx_lock(&sbi.orphan_inode_mutex);
+  std::lock_guard lock(sbi.orphan_inode_mutex);
   head = &sbi.orphan_inode_list;
   list_for_every_safe(head, this_node, next) {
     orphan = containerof(this_node, OrphanInodeEntry, list);
@@ -245,7 +234,6 @@
       break;
     }
   }
-  mtx_unlock(&sbi.orphan_inode_mutex);
 }
 
 void F2fs::RecoverOrphanInode(nid_t ino) {
@@ -300,7 +288,7 @@
   orphan_blocks =
       static_cast<uint16_t>((sbi.n_orphans + (kOrphansPerBlock - 1)) / kOrphansPerBlock);
 
-  mtx_lock(&sbi.orphan_inode_mutex);
+  std::lock_guard lock(sbi.orphan_inode_mutex);
   head = &sbi.orphan_inode_list;
 
   /* loop for each orphan inode entry and write them in Jornal block */
@@ -329,29 +317,24 @@
       nentries = 0;
       page = nullptr;
     }
-    if (page)
-      goto page_exist;
-
-    page = GrabMetaPage(start_blk);
-    orphan_blk = static_cast<OrphanBlock *>(PageAddress(page));
-    memset(orphan_blk, 0, sizeof(*orphan_blk));
-  page_exist:
+    if (!page) {
+      page = GrabMetaPage(start_blk);
+      orphan_blk = static_cast<OrphanBlock *>(PageAddress(page));
+      memset(orphan_blk, 0, sizeof(*orphan_blk));
+    }
     orphan_blk->ino[nentries++] = CpuToLe(orphan->ino);
   }
-  if (!page)
-    goto end;
-
-  orphan_blk->blk_addr = CpuToLe(index);
-  orphan_blk->blk_count = CpuToLe(orphan_blocks);
-  orphan_blk->entry_count = CpuToLe(nentries);
+  if (page) {
+    orphan_blk->blk_addr = CpuToLe(index);
+    orphan_blk->blk_count = CpuToLe(orphan_blocks);
+    orphan_blk->entry_count = CpuToLe(nentries);
 #if 0  // porting needed
   // set_page_dirty(page, this);
 #else
-  FlushDirtyMetaPage(this, page);
+    FlushDirtyMetaPage(this, page);
 #endif
-  F2fsPutPage(page, 1);
-end:
-  mtx_unlock(&sbi.orphan_inode_mutex);
+    F2fsPutPage(page, 1);
+  }
 }
 
 Page *F2fs::ValidateCheckpoint(block_t cp_addr, uint64_t *version) {
@@ -616,7 +599,7 @@
   for (t = static_cast<int>(LockType::kDataNew); t <= static_cast<int>(LockType::kNodeTrunc); t++)
     mutex_lock_op(&sbi, static_cast<LockType>(t));
 
-  mtx_lock(&sbi.write_inode);
+  fbl::AutoLock write_inode(&sbi.write_inode);
 
   /*
    * POR: we should ensure that there is no dirty node pages
@@ -631,7 +614,6 @@
     mutex_unlock_op(&sbi, LockType::kNodeWrite);
     goto retry;
   }
-  mtx_unlock(&sbi.write_inode);
 }
 
 void F2fs::UnblockOperations() TA_NO_THREAD_SAFETY_ANALYSIS {
@@ -789,19 +771,14 @@
   ResetSbDirt(&sbi);
 }
 
-/**
- * We guarantee that this checkpoint procedure should not fail.
- */
+// We guarantee that this checkpoint procedure should not fail.
 void F2fs::WriteCheckpoint(bool blocked, bool is_umount) {
   SbInfo &sbi = GetSbInfo();
   Checkpoint *ckpt = GetCheckpoint(&sbi);
   uint64_t ckpt_ver;
 
-  // TODO(unknown): Need to confirm if blocked is true
-  // if (!blocked) {
-  mtx_lock(&sbi.cp_mutex);
+  fbl::AutoLock cp_lock(&sbi.cp_mutex);
   BlockOperations();
-  //}
 
 #if 0  // porting needed (bio[type] is empty)
   // Segmgr().SubmitBio(PageType::kData, true);
@@ -809,30 +786,26 @@
   // Segmgr().SubmitBio(PageType::kMeta, true);
 #endif
 
-  /*
-   * update checkpoint pack index
-   * Increase the version number so that
-   * SIT entries and seg summaries are written at correct place
-   */
+  // update checkpoint pack index
+  // Increase the version number so that
+  // SIT entries and seg summaries are written at correct place
   ckpt_ver = LeToCpu(ckpt->checkpoint_ver);
   ckpt->checkpoint_ver = CpuToLe(static_cast<uint64_t>(++ckpt_ver));
 
-  /* write cached NAT/SIT entries to NAT/SIT area */
+  // write cached NAT/SIT entries to NAT/SIT area
   Nodemgr().FlushNatEntries();
   Segmgr().FlushSitEntries();
 
   Segmgr().ResetVictimSegmap();
 
-  /* unlock all the fs_lock[] in do_checkpoint() */
+  // unlock all the fs_lock[] in do_checkpoint()
   DoCheckpoint(is_umount);
 
   UnblockOperations();
-  mtx_unlock(&sbi.cp_mutex);
 }
 
 void F2fs::InitOrphanInfo() {
   SbInfo &sbi = GetSbInfo();
-  mtx_init(&sbi.orphan_inode_mutex, mtx_plain);
   list_initialize(&sbi.orphan_inode_list);
   sbi.n_orphans = 0;
 }
diff --git a/data.cc b/data.cc
index b9b50c3..a57b10c 100644
--- a/data.cc
+++ b/data.cc
@@ -100,18 +100,18 @@
   /* Update the page address in the parent node */
   SetDataBlkaddr(dn, blk_addr);
 
-  WriteLock(&fi->ext.ext_lock);
-
-  start_fofs = fi->ext.fofs;
-  end_fofs = fi->ext.fofs + fi->ext.len - 1;
-  start_blkaddr = fi->ext.blk_addr;
-  end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;
-
-  /* Drop and initialize the matched extent */
-  if (fi->ext.len == 1 && fofs == start_fofs)
-    fi->ext.len = 0;
-
   do {
+    std::lock_guard ext_lock(fi->ext.ext_lock);
+
+    start_fofs = fi->ext.fofs;
+    end_fofs = fi->ext.fofs + fi->ext.len - 1;
+    start_blkaddr = fi->ext.blk_addr;
+    end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;
+
+    /* Drop and initialize the matched extent */
+    if (fi->ext.len == 1 && fofs == start_fofs)
+      fi->ext.len = 0;
+
     /* Initial extent */
     if (fi->ext.len == 0) {
       if (blk_addr != kNullAddr) {
@@ -147,11 +147,9 @@
       }
       break;
     }
-    WriteUnlock(&fi->ext.ext_lock);
     return;
   } while (false);
 
-  WriteUnlock(&fi->ext.ext_lock);
   Vfs()->Nodemgr().SyncInodePage(dn);
 }
 
@@ -360,7 +358,7 @@
   //   }
   // return 0;
 #else
-  return fs->bc_->Readblk(blk_addr, page->data);
+  return fs->GetBc().Readblk(blk_addr, page->data);
 #endif
 }
 
@@ -518,7 +516,7 @@
 #endif
 
   do {
-    fbl::AutoLock lock(&sbi.fs_lock[static_cast<int>(LockType::kDataWrtie)]);
+    fs::SharedLock rlock(sbi.fs_lock[static_cast<int>(LockType::kDataWrtie)]);
     if (IsDir()) {
       DecPageCount(&sbi, CountType::kDirtyDents);
 #if 0  // porting needed
@@ -531,7 +529,7 @@
       // wbc->pages_skipped++;
       // set_page_dirty(page);
 #endif
-      FlushDirtyDataPage(Vfs(), page);
+      ZX_ASSERT(0);
     }
   } while (false);
 
@@ -604,7 +602,8 @@
 #endif
 
   do {
-    fbl::AutoLock lock(&sbi.fs_lock[static_cast<int>(LockType::kDataNew)]);
+    fs::SharedLock rlock(sbi.fs_lock[static_cast<int>(LockType::kDataNew)]);
+    std::lock_guard write_lock(io_lock_);
 
     SetNewDnode(&dn, this, NULL, NULL, 0);
     if (zx_status_t err = Vfs()->Nodemgr().GetDnodeOfData(&dn, index, 0); err != ZX_OK) {
diff --git a/dir.cc b/dir.cc
index 15ee24b..627a9a0 100644
--- a/dir.cc
+++ b/dir.cc
@@ -143,8 +143,10 @@
   unsigned int max_depth;
   unsigned int level;
 
-  if (TestFlag(InodeInfoFlag::kInlineDentry))
+  fs::SharedLock read_lock(io_lock_);
+  if (TestFlag(InodeInfoFlag::kInlineDentry)) {
     return FindInInlineDir(name, res_page);
+  }
 
   if (npages == 0)
     return nullptr;
@@ -207,7 +209,8 @@
 void Dir::SetLink(DirEntry *de, Page *page, VnodeF2fs *vnode) {
   SbInfo &sbi = Vfs()->GetSbInfo();
 
-  fbl::AutoLock lock(&sbi.fs_lock[static_cast<int>(LockType::kDentryOps)]);
+  fs::SharedLock rlock(sbi.fs_lock[static_cast<int>(LockType::kDentryOps)]);
+  std::lock_guard write_lock(io_lock_);
 #if 0  // porting needed
   // lock_page(page);
 #endif
@@ -363,6 +366,8 @@
 
   if (TestFlag(InodeInfoFlag::kInlineDentry)) {
     bool is_converted = false;
+    fs::SharedLock rlock(sbi.fs_lock[static_cast<int>(LockType::kDentryOps)]);
+    std::lock_guard write_lock(io_lock_);
     if (err = AddInlineEntry(name, vnode, &is_converted); err != ZX_OK)
       return err;
 
@@ -392,7 +397,8 @@
     bidx = DirBlockIndex(level, (dentry_hash % nbucket));
 
     for (block = bidx; block <= (bidx + nblock - 1); block++) {
-      fbl::AutoLock lock(&sbi.fs_lock[static_cast<int>(LockType::kDentryOps)]);
+      fs::SharedLock rlock(sbi.fs_lock[static_cast<int>(LockType::kDentryOps)]);
+      std::lock_guard write_lock(io_lock_);
       if (err = GetNewDataPage(block, true, &dentry_page); err != ZX_OK) {
         return err;
       }
@@ -465,13 +471,14 @@
   void *kaddr = PageAddress(page);
   int i;
 
+  fs::SharedLock rlock(sbi.fs_lock[static_cast<int>(LockType::kDentryOps)]);
+  std::lock_guard write_lock(io_lock_);
+
   if (TestFlag(InodeInfoFlag::kInlineDentry)) {
     DeleteInlineEntry(dentry, page, vnode);
     return;
   }
 
-  fbl::AutoLock lock(&sbi.fs_lock[static_cast<int>(LockType::kDentryOps)]);
-
 #if 0  // porting needed
   // lock_page(page);
 #endif
@@ -637,6 +644,8 @@
   zx_status_t ret = ZX_OK;
   bool done = false;
 
+  fs::SharedLock read_lock(io_lock_);
+
   if (TestFlag(InodeInfoFlag::kInlineDentry))
     return ReadInlineDir(cookie, dirents, len, out_actual);
 
diff --git a/dir.h b/dir.h
index ec210c0..1b6059e 100644
--- a/dir.h
+++ b/dir.h
@@ -43,7 +43,8 @@
   void fbl_recycle() { RecycleNode(); }
 
   zx_status_t Lookup(std::string_view name, fbl::RefPtr<fs::Vnode> *out) final;
-  zx_status_t Readdir(fs::VdirCookie *cookie, void *dirents, size_t len, size_t *out_actual) final;
+  zx_status_t Readdir(fs::VdirCookie *cookie, void *dirents, size_t len, size_t *out_actual) final
+      __TA_EXCLUDES(io_lock_);
   zx_status_t Create(std::string_view name, uint32_t mode, fbl::RefPtr<fs::Vnode> *out) final;
   zx_status_t Link(std::string_view name, fbl::RefPtr<fs::Vnode> _target) final;
   zx_status_t Unlink(std::string_view name, bool must_be_dir) final;
@@ -60,10 +61,10 @@
                         f2fs_hash_t namehash, Page **res_page);
   DirEntry *FindInLevel(unsigned int level, std::string_view name, int namelen,
                         f2fs_hash_t namehash, Page **res_page);
-  DirEntry *FindEntry(std::string_view name, Page **res_page);
+  DirEntry *FindEntry(std::string_view name, Page **res_page) __TA_EXCLUDES(io_lock_);
   DirEntry *ParentDir(Page **p);
   ino_t InodeByName(std::string_view name);
-  void SetLink(DirEntry *de, Page *page, VnodeF2fs *inode);
+  void SetLink(DirEntry *de, Page *page, VnodeF2fs *inode) __TA_EXCLUDES(io_lock_);
   void InitDentInode(VnodeF2fs *vnode, Page *ipage);
 #if 0  // porting needed
   // zx_status_t InitInodeMetadata(VnodeF2fs *vnode, dentry *dentry);
@@ -72,8 +73,8 @@
 #endif
   void UpdateParentMetadata(VnodeF2fs *inode, unsigned int current_depth);
   int RoomForFilename(DentryBlock *dentry_blk, int slots);
-  zx_status_t AddLink(std::string_view name, VnodeF2fs *vnode);
-  void DeleteEntry(DirEntry *dentry, Page *page, VnodeF2fs *vnode);
+  zx_status_t AddLink(std::string_view name, VnodeF2fs *vnode) __TA_EXCLUDES(io_lock_);
+  void DeleteEntry(DirEntry *dentry, Page *page, VnodeF2fs *vnode) __TA_EXCLUDES(io_lock_);
   zx_status_t MakeEmpty(VnodeF2fs *vnode, VnodeF2fs *parent);
   bool IsEmptyDir();
 
diff --git a/f2fs.h b/f2fs.h
index c3361fa..056b677 100644
--- a/f2fs.h
+++ b/f2fs.h
@@ -97,72 +97,67 @@
   [[nodiscard]] static zx_status_t Create(std::unique_ptr<f2fs::Bcache> bc,
                                           const MountOptions &options, std::unique_ptr<F2fs> *out);
 
+  void SetUnmountCallback(fbl::Closure closure) { on_unmount_ = std::move(closure); }
+  void Shutdown(fs::Vfs::ShutdownCallback cb) final;
+
+  void SetQueryService(fbl::RefPtr<QueryService> svc) { query_svc_ = std::move(svc); }
+  void SetAdminService(fbl::RefPtr<AdminService> svc) { admin_svc_ = std::move(svc); }
+
+  void CheckNidRange(const nid_t &nid);
+  zx_status_t GetFsId(zx::event *out_fs_id) const;
+  uint64_t GetFsIdLegacy() const { return fs_id_legacy_; }
+
+  VnodeCache &GetVCache() { return vnode_cache_; }
   inline zx_status_t InsertVnode(VnodeF2fs *vn) { return vnode_cache_.Add(vn); }
-
   inline void EvictVnode(VnodeF2fs *vn) { __UNUSED zx_status_t status = vnode_cache_.Evict(vn); }
-
   inline zx_status_t LookupVnode(ino_t ino, fbl::RefPtr<VnodeF2fs> *out) {
     return vnode_cache_.Lookup(ino, out);
   }
 
-  void GetNodeInfo(nid_t nid, NodeInfo *ni);
-
-  void SetUnmountCallback(fbl::Closure closure) { on_unmount_ = std::move(closure); }
-  void Shutdown(fs::Vfs::ShutdownCallback cb) final;
-
-  // TODO(unknown): non-public member variables
-  std::unique_ptr<f2fs::Bcache> bc_;
-
+  void ResetBc(std::unique_ptr<f2fs::Bcache> *out = nullptr) {
+    if (out == nullptr) {
+      bc_.reset();
+      return;
+    }
+    *out = std::move(bc_);
+  };
+  Bcache &GetBc() { return *bc_; }
   SuperBlock &RawSb() { return *raw_sb_; }
   SbInfo &GetSbInfo() { return *sbi_; }
   SegMgr &Segmgr() { return *seg_mgr_; }
   NodeMgr &Nodemgr() { return *node_mgr_; }
 
-  zx_status_t GetFsId(zx::event *out_fs_id) const;
-  uint64_t GetFsIdLegacy() const { return fs_id_legacy_; }
-
-#if 0  // porting needed
-  // void InitOnce(void *foo);
-  // VnodeF2fs *F2fsAllocInode();
-  // static void F2fsICallback(rcu_head *head);
-  // void F2fsDestroyInode(inode *inode);
-#endif
-
+  // super.cc
   void PutSuper();
   zx_status_t SyncFs(int sync);
-#if 0  // porting needed
-  // int F2fsStatfs(dentry *dentry /*, kstatfs *buf*/);
-  // int F2fsShowOptions(/*seq_file *seq*/);
-  // VnodeF2fs *F2fsNfsGetInode(uint64_t ino, uint32_t generation);
-  // dentry *F2fsFhToDentry(fid *fid, int fh_len, int fh_type);
-  // dentry *F2fsFhToParent(fid *fid, int fh_len, int fh_type);
-#endif
-  loff_t MaxFileSize(unsigned bits);
   int SanityCheckRawSuper();
   int SanityCheckCkpt();
   void InitSbInfo();
   zx_status_t FillSuper();
   void ParseOptions();
 #if 0  // porting needed
-  // dentry *F2fsMount(file_system_type *fs_type, int flags,
-  //     const char *dev_name, void *data);
-  // int InitInodecache(void);
-  // void DestroyInodecache(void);
-  // int /*__init*/ initF2fsFs(void);
-  // void /*__exit*/ exitF2fsFs(void);
+  void InitOnce(void *foo);
+  VnodeF2fs *F2fsAllocInode();
+  static void F2fsICallback(rcu_head *head);
+  void F2fsDestroyInode(inode *inode);
+  int F2fsStatfs(dentry *dentry /*, kstatfs *buf*/);
+  int F2fsShowOptions(/*seq_file *seq*/);
+  VnodeF2fs *F2fsNfsGetInode(uint64_t ino, uint32_t generation);
+  dentry *F2fsFhToDentry(fid *fid, int fh_len, int fh_type);
+  dentry *F2fsFhToParent(fid *fid, int fh_len, int fh_type);
+  dentry *F2fsMount(file_system_type *fs_type, int flags,
+       const char *dev_name, void *data);
+  int InitInodecache(void);
+  void DestroyInodecache(void);
+  int /*__init*/ initF2fsFs(void);
+  void /*__exit*/ exitF2fsFs(void);
 #endif
 
   // checkpoint.cc
   Page *GetMetaPage(pgoff_t index);
   Page *GrabMetaPage(pgoff_t index);
   zx_status_t F2fsWriteMetaPage(Page *page, WritebackControl *wbc);
-#if 0  // porting needed
-  // int F2fsWriteMetaPages(address_space *mapping, WritebackControl *wbc);
-#endif
   int64_t SyncMetaPages(PageType type, long nr_to_write);
-#if 0  // porting needed
-  // int F2fsSetMetaPageDirty(Page *page);
-#endif
   zx_status_t CheckOrphanSpace();
   void AddOrphanInode(VnodeF2fs *vnode);
   void AddOrphanInode(nid_t ino);
@@ -172,10 +167,6 @@
   void WriteOrphanInodes(block_t start_blk);
   zx_status_t GetValidCheckpoint();
   Page *ValidateCheckpoint(block_t cp_addr, uint64_t *version);
-#if 0  // porting needed
-  // void SetDirtyDirPage(VnodeF2fs *vnode, Page *page);
-  // void RemoveDirtyDirInode(VnodeF2fs *vnode);
-#endif
   void SyncDirtyDirInodes();
   void BlockOperations();
   void UnblockOperations();
@@ -183,6 +174,10 @@
   void WriteCheckpoint(bool blocked, bool is_umount);
   void InitOrphanInfo();
 #if 0  // porting needed
+  int F2fsWriteMetaPages(address_space *mapping, WritebackControl *wbc);
+  int F2fsSetMetaPageDirty(Page *page);
+  void SetDirtyDirPage(VnodeF2fs *vnode, Page *page);
+  void RemoveDirtyDirInode(VnodeF2fs *vnode);
   int CreateCheckpointCaches();
   void DestroyCheckpointCaches();
 #endif
@@ -202,20 +197,16 @@
   // block count
   void DecValidBlockCount(VnodeF2fs *vnode, block_t count);
   zx_status_t IncValidBlockCount(VnodeF2fs *vnode, block_t count);
-
   block_t ValidUserBlocks();
   uint64_t ValidNodeCount();
   void IncValidInodeCount();
   void DecValidInodeCount();
   uint64_t ValidInodeCount();
-
-  void CheckNidRange(const nid_t &nid);
-
-  VnodeCache &GetVCache() { return vnode_cache_; }
-  void SetQueryService(fbl::RefPtr<QueryService> svc) { query_svc_ = std::move(svc); }
-  void SetAdminService(fbl::RefPtr<AdminService> svc) { admin_svc_ = std::move(svc); }
+  loff_t MaxFileSize(unsigned bits);
 
  private:
+  std::unique_ptr<f2fs::Bcache> bc_;
+
   fbl::RefPtr<VnodeF2fs> root_vnode_;
   fbl::Closure on_unmount_{};
   MountOptions mount_options_;
diff --git a/f2fs_internal.h b/f2fs_internal.h
index 54a7160..4aaeb7a 100644
--- a/f2fs_internal.h
+++ b/f2fs_internal.h
@@ -74,10 +74,10 @@
 
 // for in-memory extent cache entry
 struct ExtentInfo {
-  rwlock_t ext_lock;      // rwlock for consistency
-  uint64_t fofs = 0;      // start offset in a file
-  uint32_t blk_addr = 0;  // start block address of the extent
-  uint64_t len = 0;       // lenth of the extent
+  fs::SharedMutex ext_lock;  // rwlock for consistency
+  uint64_t fofs = 0;         // start offset in a file
+  uint32_t blk_addr = 0;     // start block address of the extent
+  uint64_t len = 0;          // lenth of the extent
 };
 
 // i_advise uses Fadvise:xxx bit. We can add additional hints later.
@@ -108,16 +108,16 @@
 
   // NAT cache management
   RadixTreeRoot nat_root;         // root of the nat entry cache
-  rwlock_t nat_tree_lock;         // protect nat_tree_lock
+  fs::SharedMutex nat_tree_lock;  // protect nat_tree_lock
   uint32_t nat_cnt = 0;           // the # of cached nat entries
   list_node_t nat_entries;        // cached nat entry list (clean)
   list_node_t dirty_nat_entries;  // cached nat entry list (dirty)
 
   // free node ids management
-  list_node_t free_nid_list;      // a list for free nids
-  spinlock_t free_nid_list_lock;  // protect free nid list
-  uint64_t fcnt = 0;              // the number of free node id
-  mtx_t build_lock;               // lock for build free nids
+  list_node_t free_nid_list;           // a list for free nids
+  fs::SharedMutex free_nid_list_lock;  // protect free nid list
+  uint64_t fcnt = 0;                   // the number of free node id
+  fbl::Mutex build_lock;               // lock for build free nids
 
   // for checkpoint
   char *nat_bitmap = nullptr;       // NAT bitmap pointer
@@ -179,9 +179,6 @@
   struct DirtySeglistInfo *dirty_info = nullptr;  // dirty segment information
   struct CursegInfo *curseg_array = nullptr;      // active segment information
 
-  list_node_t wblist_head;  // list of under-writeback pages
-  spinlock_t wblist_lock;   // lock for checkpoint
-
   block_t seg0_blkaddr = 0;  // block address of 0'th segment
   block_t main_blkaddr = 0;  // start block address of main area
   block_t ssa_blkaddr = 0;   // start block address of SSA area
@@ -268,20 +265,20 @@
   Checkpoint *ckpt = nullptr;  // raw checkpoint pointer
   // inode *meta_inode;		// cache meta blocks
   fbl::RefPtr<VnodeF2fs> meta_vnode;
-  mtx_t cp_mutex;                                          // for checkpoint procedure
-  mtx_t fs_lock[static_cast<int>(LockType::kNrLockType)];  // for blocking FS operations
-  mtx_t write_inode;                                       // mutex for write inode
-  mtx_t writepages;                                        // mutex for writepages()
-  int por_doing = 0;                                       // recovery is doing or not
+  fbl::Mutex cp_mutex;                                               // for checkpoint procedure
+  fs::SharedMutex fs_lock[static_cast<int>(LockType::kNrLockType)];  // for blocking FS operations
+  fbl::Mutex write_inode;                                            // mutex for write inode
+  fbl::Mutex writepages;                                             // mutex for writepages()
+  int por_doing = 0;                                                 // recovery is doing or not
 
   // for orphan inode management
-  list_node_t orphan_inode_list;  // orphan inode list
-  mtx_t orphan_inode_mutex;       // for orphan inode list
-  uint64_t n_orphans = 0;         // # of orphan inodes
+  list_node_t orphan_inode_list;       // orphan inode list
+  fs::SharedMutex orphan_inode_mutex;  // for orphan inode list
+  uint64_t n_orphans = 0;              // # of orphan inodes
 
   // for directory inode management
   list_node_t dir_inode_list;  // dir inode list
-  spinlock_t dir_inode_lock;   // for dir inode list lock
+  fbl::Mutex dir_inode_lock;   // for dir inode list lock
   uint64_t n_dirty_dirs = 0;   // # of dir inodes
 
   // basic file system units
@@ -311,7 +308,7 @@
   uint64_t mount_opt = 0;  // set with kMountOptxxxx bits according to F2fs::mount_options_
 
   // for cleaning operations
-  mtx_t gc_mutex;                              // mutex for GC
+  fs::SharedMutex gc_mutex;                    // mutex for GC
   struct F2fsGc_kthread *gc_thread = nullptr;  // GC thread
 
   // for stat information.
@@ -322,23 +319,9 @@
   uint64_t last_victim[2];                     // last victim segment #
   int total_hit_ext = 0, read_hit_ext = 0;     // extent cache hit ratio
   int bg_gc = 0;                               // background gc calls
-  spinlock_t stat_lock;                        // lock for stat operations
+  fbl::Mutex stat_lock;                        // lock for stat operations
 };
 
-// Inline functions
-
-// static inline InodeInfo *F2FS_I(VnodeF2fs *vnode)
-// {
-//   // TODO: IMPL
-// 	//return container_of(inode, InodeInfo, vfs_inode);
-//   return &vnode->fi;
-// }
-
-// static inline SbInfo *F2FS_SB(super_block *sb)
-//{
-//  return (SbInfo *)sb->s_fs_info;
-//}
-
 static inline const SuperBlock *RawSuper(SbInfo *sbi) {
   return static_cast<const SuperBlock *>(sbi->raw_super);
 }
@@ -369,29 +352,18 @@
 
 static inline void mutex_lock_op(SbInfo *sbi, LockType t)
     TA_ACQ(&sbi->fs_lock[static_cast<int>(t)]) {
-  // TODO: IMPL
-  // mutex_lock_nested(&sbi->fs_lock[static_cast<int>(t)], t);
-  mtx_lock(&sbi->fs_lock[static_cast<int>(t)]);
+  // TODO: Too many locks.
+  // It seems that two locks (node/io) are enough to sync between cp and io path.
+  sbi->fs_lock[static_cast<int>(t)].lock();
 }
 
 static inline void mutex_unlock_op(SbInfo *sbi, LockType t)
     TA_REL(&sbi->fs_lock[static_cast<int>(t)]) {
-  mtx_unlock(&sbi->fs_lock[static_cast<int>(t)]);
+  sbi->fs_lock[static_cast<int>(t)].unlock();
 }
 
 constexpr uint32_t kDefaultAllocatedBlocks = 1;
 
-// Check whether the inode has blocks or not
-
-// [[maybe_unused]]
-// static inline int HasBlocks(inode *inode)
-// {
-// 	if (F2FS_I(inode)->i_xattr_nid)
-// 		return (inode->i_blocks > kDefaultAllocatedBlocks + 1);
-// 	else
-// 		return (inode->i_blocks > kDefaultAllocatedBlocks);
-// }
-
 [[maybe_unused]] static inline void IncPageCount(SbInfo *sbi, int count_type) {
   // TODO: IMPL
   // AtomicInc(&sbi->nr_pages[count_type]);
diff --git a/f2fs_lib.h b/f2fs_lib.h
index 44acc5f..53f7b1b 100644
--- a/f2fs_lib.h
+++ b/f2fs_lib.h
@@ -8,10 +8,9 @@
 #include "f2fs_types.h"
 
 namespace f2fs {
-/*
- * Page cache helper
- * TODO: Need to be changed once Pager is available
- */
+
+// Page cache helper
+// TODO: Need to be changed once Pager is available
 inline Page *GrabCachePage(void *vnode, uint32_t nid, uint32_t /*TODO pgoff_t*/ index) {
   Page *page = new Page();
   page->index = index;
@@ -42,14 +41,10 @@
   // TODO: Once Pager is availabe, it could be used for wb synchronization
 }
 
-/*
- * Checkpoint
- */
+// Checkpoint
 inline int64_t VerAfter(uint64_t a, uint64_t b) { return (static_cast<int64_t>(a - b) > 0); }
 
-/*
- * CRC
- */
+// CRC
 inline unsigned int F2fsCalCrc32(unsigned int crc, void *buff, unsigned int len) {
   int i;
   unsigned char *p = static_cast<unsigned char *>(buff);
@@ -69,41 +64,15 @@
   return F2fsCrc32(buff, buff_size) == blk_crc;
 }
 
-/*
- * Error code in pointer variables
- * TODO: should remove them. there is no room for errno in Fuchsia.
- *
- */
+// Error code in pointer variable
+// TODO: should remove them. there is no room for errno in Fuchsia.
 inline bool IsErr(const void *ptr) { return (ptr == nullptr); }
 
 inline long PtrErr(const void *ptr) { return 0; }
 
 inline void *ErrPtr(long error) { return nullptr; }
 
-/*
- * Every lock is using mutex.
- * TODO: need to find more appropriate methods for rw lock.
- */
-static inline void SpinLockInit(spinlock_t *lock) { mtx_init(lock, mtx_plain); }
-
-static inline void SpinLock(spinlock_t *lock) TA_ACQ(lock) { mtx_lock(lock); }
-
-static inline void SpinUnlock(spinlock_t *lock) TA_REL(lock) { mtx_unlock(lock); }
-
-static inline void RwlockInit(rwlock_t *lock) { mtx_init(lock, mtx_plain); }
-
-static inline void ReadLock(rwlock_t *lock) TA_ACQ(lock) { mtx_lock(lock); }
-
-static inline void WriteLock(rwlock_t *lock) TA_ACQ(lock) { mtx_lock(lock); }
-
-static inline void ReadUnlock(rwlock_t *lock) TA_REL(lock) { mtx_unlock(lock); }
-
-static inline void WriteUnlock(rwlock_t *lock) TA_REL(lock) { mtx_unlock(lock); }
-
-/*
- * Bitmap operations
- * TODO: some operations (e.g., test_and_set) requires atomicity
- */
+// Bitmap operations
 inline size_t DivRoundUp(size_t n, size_t d) { return (((n) + (d)-1) / (d)); }
 inline size_t BitsToLongs(size_t nr) { return DivRoundUp(nr, kBitsPerByte * sizeof(long)); }
 
@@ -250,9 +219,7 @@
   atomic_fetch_sub_explicit(t, 1, std::memory_order_relaxed);
 }
 
-/*
- * List operations
- */
+// List operations
 inline void list_move_tail(list_node_t *list, list_node_t *item) {
   list_delete(item);
   list_add_tail(list, item);
@@ -265,9 +232,7 @@
   list->next = item;
 }
 
-/*
- * Zero segment
- */
+// Zero segment
 static inline void ZeroUserSegments(Page *page, unsigned start1, unsigned end1, unsigned start2,
                                     unsigned end2) {
   char *data = (char *)PageAddress(page);
@@ -289,9 +254,7 @@
   ZeroUserSegments(page, start, start + size, 0, 0);
 }
 
-/*
- * Inode
- */
+// Inode
 static inline void *Igrab(void *vnode) {
   // TODO: need to add ref. count if vnode is valid
   return vnode;
diff --git a/f2fs_types.h b/f2fs_types.h
index 4d70081..b8089b6 100644
--- a/f2fs_types.h
+++ b/f2fs_types.h
@@ -23,8 +23,6 @@
 using pgoff_t = unsigned long;
 using atomic_t = std::atomic_int;
 using umode_t = uint16_t;
-using spinlock_t = mtx_t;
-using rwlock_t = mtx_t;
 // Radix Tree
 struct RadixTreeRoot {};
 
diff --git a/file.cc b/file.cc
index 2325ab5..52f34d1 100644
--- a/file.cc
+++ b/file.cc
@@ -323,7 +323,7 @@
 
   for (n = blk_start; n <= blk_end; n++) {
     bool is_empty_page = false;
-
+    fs::SharedLock read_lock(io_lock_);
     if (zx_status_t ret = GetLockDataPage(n, &data_page); ret != ZX_OK) {
       if (ret == ZX_ERR_NOT_FOUND) {  // truncated page
         is_empty_page = true;
@@ -433,6 +433,7 @@
     off_in_buf += cur_len;
     left -= cur_len;
 
+    std::lock_guard write_lock(io_lock_);
     SetSize(std::max(static_cast<size_t>(GetSize()), offset + off_in_buf));
 #if 0  // porting needed
     // set_page_dirty(data_page, Vfs());
diff --git a/namei.cc b/namei.cc
index 338f1b1..4be5915 100644
--- a/namei.cc
+++ b/namei.cc
@@ -15,7 +15,7 @@
   VnodeF2fs *vnode = nullptr;
 
   do {
-    fbl::AutoLock lock(&sbi.fs_lock[static_cast<int>(LockType::kNodeNew)]);
+    fs::SharedLock rlock(sbi.fs_lock[static_cast<int>(LockType::kNodeNew)]);
     if (!Vfs()->Nodemgr().AllocNid(&ino)) {
       Iput(vnode);
       return ZX_ERR_NO_SPACE;
@@ -51,7 +51,7 @@
   if (TestOpt(&sbi, kMountInlineDentry) && vnode->IsDir())
     vnode->SetFlag(InodeInfoFlag::kInlineDentry);
 
-  SetFlag(InodeInfoFlag::kNewInode);
+  vnode->SetFlag(InodeInfoFlag::kNewInode);
   Vfs()->InsertVnode(vnode);
   vnode->MarkInodeDirty();
 
@@ -434,7 +434,7 @@
   }
 
   do {
-    fbl::AutoLock rlock(&sbi.fs_lock[static_cast<int>(LockType::kRename)]);
+    fs::SharedLock rlock(sbi.fs_lock[static_cast<int>(LockType::kRename)]);
 
     new_entry = new_dir->FindEntry(newname, &new_page);
     if (new_entry) {
diff --git a/node.cc b/node.cc
index 8d21b9a..18f7e3c 100644
--- a/node.cc
+++ b/node.cc
@@ -24,19 +24,17 @@
   block_t valid_block_count;
   uint32_t ValidNodeCount;
 
-  SpinLock(&sbi->stat_lock);
+  fbl::AutoLock stat_lock(&sbi->stat_lock);
 
   valid_block_count = sbi->total_valid_block_count + static_cast<block_t>(count);
   sbi->alloc_valid_block_count += static_cast<block_t>(count);
   ValidNodeCount = sbi->total_valid_node_count + count;
 
   if (valid_block_count > sbi->user_block_count) {
-    SpinUnlock(&sbi->stat_lock);
     return false;
   }
 
   if (ValidNodeCount > sbi->total_node_count) {
-    SpinUnlock(&sbi->stat_lock);
     return false;
   }
 
@@ -44,7 +42,6 @@
     vnode->IncBlocks(count);
   sbi->total_valid_node_count = ValidNodeCount;
   sbi->total_valid_block_count = valid_block_count;
-  SpinUnlock(&sbi->stat_lock);
 
   return true;
 }
@@ -59,10 +56,10 @@
 
   if (nm_i->fcnt <= 0)
     return ZX_ERR_OUT_OF_RANGE;
-  SpinLock(&nm_i->free_nid_list_lock);
+
+  std::lock_guard free_nid_lock(nm_i->free_nid_list_lock);
   fnid = containerof(nm_i->free_nid_list.next, FreeNid, list);
   *nid = fnid->nid;
-  SpinUnlock(&nm_i->free_nid_list_lock);
   return ZX_OK;
 }
 
@@ -318,7 +315,7 @@
 }
 
 inline void NodeMgr::DecValidNodeCount(SbInfo *sbi, VnodeF2fs *vnode, uint32_t count) {
-  SpinLock(&sbi->stat_lock);
+  fbl::AutoLock stat_lock(&sbi->stat_lock);
 
   // TODO: IMPL
   ZX_ASSERT(!(sbi->total_valid_block_count < count));
@@ -327,8 +324,6 @@
   vnode->DecBlocks(count);
   sbi->total_valid_node_count -= count;
   sbi->total_valid_block_count -= count;
-
-  SpinUnlock(&sbi->stat_lock);
 }
 
 /*
@@ -499,11 +494,10 @@
   NatEntry *e;
   int is_cp = 1;
 
-  ReadLock(&nm_i->nat_tree_lock);
+  fs::SharedLock nat_lock(nm_i->nat_tree_lock);
   e = LookupNatCache(nm_i, nid);
   if (e && !e->checkpointed)
     is_cp = 0;
-  ReadUnlock(&nm_i->nat_tree_lock);
   return is_cp;
 }
 
@@ -532,12 +526,11 @@
 void NodeMgr::CacheNatEntry(NmInfo *nm_i, nid_t nid, RawNatEntry *ne) {
   NatEntry *e;
 retry:
-  WriteLock(&nm_i->nat_tree_lock);
+  std::lock_guard lock(nm_i->nat_tree_lock);
   e = LookupNatCache(nm_i, nid);
   if (!e) {
     e = GrabNatEntry(nm_i, nid);
     if (!e) {
-      WriteUnlock(&nm_i->nat_tree_lock);
       goto retry;
     }
     NatSetBlkaddr(e, LeToCpu(ne->block_addr));
@@ -545,7 +538,6 @@
     NatSetVersion(e, ne->version);
     e->checkpointed = true;
   }
-  WriteUnlock(&nm_i->nat_tree_lock);
 }
 
 void NodeMgr::SetNodeAddr(NodeInfo *ni, block_t new_blkaddr) {
@@ -553,12 +545,11 @@
   NmInfo *nm_i = GetNmInfo(&sbi);
   NatEntry *e;
 retry:
-  WriteLock(&nm_i->nat_tree_lock);
+  std::lock_guard nat_lock(nm_i->nat_tree_lock);
   e = LookupNatCache(nm_i, ni->nid);
   if (!e) {
     e = GrabNatEntry(nm_i, ni->nid);
     if (!e) {
-      WriteUnlock(&nm_i->nat_tree_lock);
       goto retry;
     }
     e->ni = *ni;
@@ -593,7 +584,6 @@
   /* change address */
   NatSetBlkaddr(e, new_blkaddr);
   SetNatCacheDirty(nm_i, e);
-  WriteUnlock(&nm_i->nat_tree_lock);
 }
 
 int NodeMgr::TryToFreeNats(int nr_shrink) {
@@ -603,7 +593,7 @@
   if (nm_i->nat_cnt < 2 * kNmWoutThreshold)
     return 0;
 
-  WriteLock(&nm_i->nat_tree_lock);
+  std::lock_guard nat_lock(nm_i->nat_tree_lock);
   while (nr_shrink && !list_is_empty(&nm_i->nat_entries)) {
     NatEntry *ne;
     // ne = list_first_entry(&nm_i->nat_entries,
@@ -612,7 +602,6 @@
     DelFromNatCache(nm_i, ne);
     nr_shrink--;
   }
-  WriteUnlock(&nm_i->nat_tree_lock);
   return nr_shrink;
 }
 
@@ -633,38 +622,36 @@
 
   ni->nid = nid;
 
-  /* Check nat cache */
-  ReadLock(&nm_i->nat_tree_lock);
-  e = LookupNatCache(nm_i, nid);
-  if (e) {
-    ni->ino = NatGetIno(e);
-    ni->blk_addr = NatGetBlkaddr(e);
-    ni->version = NatGetVersion(e);
+  {
+    /* Check nat cache */
+    fs::SharedLock nat_lock(nm_i->nat_tree_lock);
+    e = LookupNatCache(nm_i, nid);
+    if (e) {
+      ni->ino = NatGetIno(e);
+      ni->blk_addr = NatGetBlkaddr(e);
+      ni->version = NatGetVersion(e);
+      return;
+    }
   }
-  ReadUnlock(&nm_i->nat_tree_lock);
-  if (e)
-    return;
 
-  /* Check current segment summary */
-  mtx_lock(&curseg->curseg_mutex);
-  i = SegMgr::LookupJournalInCursum(sum, JournalType::kNatJournal, nid, 0);
-  if (i >= 0) {
-    ne = NatInJournal(sum, i);
+  {
+    /* Check current segment summary */
+    fbl::AutoLock curseg_lock(&curseg->curseg_mutex);
+    i = SegMgr::LookupJournalInCursum(sum, JournalType::kNatJournal, nid, 0);
+    if (i >= 0) {
+      ne = NatInJournal(sum, i);
+      NodeInfoFromRawNat(ni, &ne);
+    }
+  }
+  if (i < 0) {
+    /* Fill NodeInfo from nat page */
+    page = GetCurrentNatPage(start_nid);
+    nat_blk = static_cast<NatBlock *>(PageAddress(page));
+    ne = nat_blk->entries[nid - start_nid];
+
     NodeInfoFromRawNat(ni, &ne);
+    F2fsPutPage(page, 1);
   }
-  mtx_unlock(&curseg->curseg_mutex);
-  if (i >= 0)
-    goto cache;
-
-  /* Fill NodeInfo from nat page */
-  page = GetCurrentNatPage(start_nid);
-  nat_blk = static_cast<NatBlock *>(PageAddress(page));
-  ne = nat_blk->entries[nid - start_nid];
-
-  NodeInfoFromRawNat(ni, &ne);
-  F2fsPutPage(page, 1);
-
-cache:
   /* cache nat entry */
   CacheNatEntry(GetNmInfo(&sbi), nid, &ne);
 }
@@ -743,9 +730,7 @@
   return zx::ok(level);
 }
 
-/*
- * Caller should call f2fs_put_dnode(dn).
- */
+// Caller should call f2fs_put_dnode(dn).
 zx_status_t NodeMgr::GetDnodeOfData(DnodeOfData *dn, pgoff_t index, int ro) {
   SbInfo &sbi = fs_->GetSbInfo();
   Page *npage[4];
@@ -778,11 +763,10 @@
     bool done = false;
 
     if (!nids[i] && !ro) {
-      mutex_lock_op(&sbi, LockType::kNodeNew);
+      fs::SharedLock rlock(sbi.fs_lock[static_cast<int>(LockType::kNodeNew)]);
 
       /* alloc new node */
       if (!AllocNid(&(nids[i]))) {
-        mutex_unlock_op(&sbi, LockType::kNodeNew);
         err = ZX_ERR_NO_SPACE;
         goto release_pages;
       }
@@ -792,13 +776,11 @@
       err = NewNodePage(dn, noffset[i], &npage[i]);
       if (err) {
         AllocNidFailed(nids[i]);
-        mutex_unlock_op(&sbi, LockType::kNodeNew);
         goto release_pages;
       }
 
       SetNid(parent, offset[i - 1], nids[i], i == 1);
       AllocNidDone(nids[i]);
-      mutex_unlock_op(&sbi, LockType::kNodeNew);
       done = true;
     } else if (ro && i == level && level > 1) {
 #if 0  // porting needed
@@ -1127,10 +1109,9 @@
   DnodeOfData dn;
   zx_status_t err = 0;
 
-  mutex_lock_op(&sbi, LockType::kNodeTrunc);
+  fs::SharedLock rlock(sbi.fs_lock[static_cast<int>(LockType::kNodeTrunc)]);
   err = GetNodePage(ino, &page);
   if (err) {
-    mutex_unlock_op(&sbi, LockType::kNodeTrunc);
     return err;
   }
 
@@ -1139,7 +1120,6 @@
     err = GetNodePage(nid, &npage);
 
     if (err) {
-      mutex_unlock_op(&sbi, LockType::kNodeTrunc);
       return err;
     }
 
@@ -1163,7 +1143,6 @@
   } else {
     ZX_ASSERT(0);
   }
-  mutex_unlock_op(&sbi, LockType::kNodeTrunc);
   return ZX_OK;
 }
 
@@ -1175,10 +1154,11 @@
 
   /* allocate inode page for new inode */
   SetNewDnode(&dn, child, nullptr, nullptr, child->Ino());
-  mutex_lock_op(&sbi, LockType::kNodeNew);
-  err = NewNodePage(&dn, 0, &page);
-  parent->InitDentInode(child, page);
-  mutex_unlock_op(&sbi, LockType::kNodeNew);
+  {
+    fs::SharedLock rlock(sbi.fs_lock[static_cast<int>(LockType::kNodeNew)]);
+    err = NewNodePage(&dn, 0, &page);
+    parent->InitDentInode(child, page);
+  }
   if (err)
     return err;
   F2fsPutPage(page, 1);
@@ -1460,7 +1440,7 @@
 #endif
   WaitOnPageWriteback(page);
 
-  mutex_lock_op(&sbi, LockType::kNodeWrite);
+  fs::SharedLock rlock(sbi.fs_lock[static_cast<int>(LockType::kNodeWrite)]);
 
   /* get old block addr of this node page */
   nid = NidOfNode(page);
@@ -1471,7 +1451,6 @@
 
   /* This page is already truncated */
   if (ni.blk_addr == kNullAddr) {
-    mutex_unlock_op(&sbi, LockType::kNodeWrite);
     return ZX_OK;
   }
 
@@ -1482,7 +1461,6 @@
   SetNodeAddr(&ni, new_addr);
   DecPageCount(&sbi, CountType::kDirtyNodes);
 
-  mutex_unlock_op(&sbi, LockType::kNodeWrite);
   // TODO: IMPL
   // unlock_page(page);
   return ZX_OK;
@@ -1587,9 +1565,8 @@
   i->nid = nid;
   i->state = static_cast<int>(NidState::kNidNew);
 
-  SpinLock(&nm_i->free_nid_list_lock);
+  std::lock_guard free_nid_lock(nm_i->free_nid_list_lock);
   if (LookupFreeNidList(nid, &nm_i->free_nid_list)) {
-    SpinUnlock(&nm_i->free_nid_list_lock);
 #if 0  // porting needed
     // kmem_cache_free(free_nid_slab, i);
 #endif
@@ -1598,19 +1575,17 @@
   }
   list_add_tail(&nm_i->free_nid_list, &i->list);
   nm_i->fcnt++;
-  SpinUnlock(&nm_i->free_nid_list_lock);
   return 1;
 }
 
 void NodeMgr::RemoveFreeNid(NmInfo *nm_i, nid_t nid) {
   FreeNid *i;
-  SpinLock(&nm_i->free_nid_list_lock);
+  std::lock_guard free_nid_lock(nm_i->free_nid_list_lock);
   i = LookupFreeNidList(nid, &nm_i->free_nid_list);
   if (i && i->state == static_cast<int>(NidState::kNidNew)) {
     DelFromFreeNidList(i);
     nm_i->fcnt--;
   }
-  SpinUnlock(&nm_i->free_nid_list_lock);
 }
 
 int NodeMgr::ScanNatPage(NmInfo *nm_i, Page *nat_page, nid_t start_nid) {
@@ -1670,28 +1645,28 @@
 
   nm_i->next_scan_nid = nid;
 
-  /* find free nids from current sum_pages */
-  mtx_lock(&curseg->curseg_mutex);
-  for (i = 0; i < NatsInCursum(sum); i++) {
-    block_t addr = LeToCpu(NatInJournal(sum, i).block_addr);
-    nid = LeToCpu(NidInJournal(sum, i));
-    if (addr == kNullAddr) {
-      AddFreeNid(nm_i, nid);
-    } else {
-      RemoveFreeNid(nm_i, nid);
+  {
+    // find free nids from current sum_pages
+    fbl::AutoLock curseg_lock(&curseg->curseg_mutex);
+    for (i = 0; i < NatsInCursum(sum); i++) {
+      block_t addr = LeToCpu(NatInJournal(sum, i).block_addr);
+      nid = LeToCpu(NidInJournal(sum, i));
+      if (addr == kNullAddr) {
+        AddFreeNid(nm_i, nid);
+      } else {
+        RemoveFreeNid(nm_i, nid);
+      }
     }
   }
-  mtx_unlock(&curseg->curseg_mutex);
 
   /* remove the free nids from current allocated nids */
   list_for_every_entry_safe (&nm_i->free_nid_list, fnid, next_fnid, FreeNid, list) {
     NatEntry *ne;
 
-    ReadLock(&nm_i->nat_tree_lock);
+    fs::SharedLock lock(nm_i->nat_tree_lock);
     ne = LookupNatCache(nm_i, fnid->nid);
     if (ne && NatGetBlkaddr(ne) != kNullAddr)
       RemoveFreeNid(nm_i, fnid->nid);
-    ReadUnlock(&nm_i->nat_tree_lock);
   }
 }
 
@@ -1705,29 +1680,27 @@
   NmInfo *nm_i = GetNmInfo(&sbi);
   FreeNid *i = nullptr;
   list_node_t *this_list;
-retry:
-  mtx_lock(&nm_i->build_lock);
+retry : {
+  fbl::AutoLock lock(&nm_i->build_lock);
   if (!nm_i->fcnt) {
     /* scan NAT in order to build free nid list */
     BuildFreeNids();
     if (!nm_i->fcnt) {
-      mtx_unlock(&nm_i->build_lock);
       return false;
     }
   }
-  mtx_unlock(&nm_i->build_lock);
+}
 
   /*
    * We check fcnt again since previous check is racy as
    * we didn't hold free_nid_list_lock. So other thread
    * could consume all of free nids.
    */
-  SpinLock(&nm_i->free_nid_list_lock);
   if (!nm_i->fcnt) {
-    SpinUnlock(&nm_i->free_nid_list_lock);
     goto retry;
   }
 
+  std::lock_guard lock(nm_i->free_nid_list_lock);
   ZX_ASSERT(!list_is_empty(&nm_i->free_nid_list));
 
   list_for_every(&nm_i->free_nid_list, this_list) {
@@ -1740,7 +1713,6 @@
   *nid = i->nid;
   i->state = static_cast<int>(NidState::kNidAlloc);
   nm_i->fcnt--;
-  SpinUnlock(&nm_i->free_nid_list_lock);
   return true;
 }
 
@@ -1752,13 +1724,12 @@
   NmInfo *nm_i = GetNmInfo(&sbi);
   FreeNid *i;
 
-  SpinLock(&nm_i->free_nid_list_lock);
+  std::lock_guard free_nid_lock(nm_i->free_nid_list_lock);
   i = LookupFreeNidList(nid, &nm_i->free_nid_list);
   if (i) {
     ZX_ASSERT(i->state == static_cast<int>(NidState::kNidAlloc));
     DelFromFreeNidList(i);
   }
-  SpinUnlock(&nm_i->free_nid_list_lock);
 }
 
 /**
@@ -1875,15 +1846,15 @@
   SummaryBlock *sum = curseg->sum_blk;
   int i;
 
-  fbl::AutoLock lock(&curseg->curseg_mutex);
+  fbl::AutoLock curseg_lock(&curseg->curseg_mutex);
 
-  ReadLock(&nm_i->nat_tree_lock);
-  size_t dirty_nat_cnt = list_length(&nm_i->dirty_nat_entries);
-  if ((NatsInCursum(sum) + dirty_nat_cnt) <= kNatJournalEntries) {
-    ReadUnlock(&nm_i->nat_tree_lock);
-    return false;
+  {
+    fs::SharedLock nat_lock(nm_i->nat_tree_lock);
+    size_t dirty_nat_cnt = list_length(&nm_i->dirty_nat_entries);
+    if ((NatsInCursum(sum) + dirty_nat_cnt) <= kNatJournalEntries) {
+      return false;
+    }
   }
-  ReadUnlock(&nm_i->nat_tree_lock);
 
   for (i = 0; i < NatsInCursum(sum); i++) {
     NatEntry *ne = nullptr;
@@ -1891,22 +1862,19 @@
     nid_t nid = LeToCpu(NidInJournal(sum, i));
 
     while (!ne) {
-      WriteLock(&nm_i->nat_tree_lock);
+      std::lock_guard nat_lock(nm_i->nat_tree_lock);
       ne = LookupNatCache(nm_i, nid);
       if (ne) {
         SetNatCacheDirty(nm_i, ne);
-        WriteUnlock(&nm_i->nat_tree_lock);
       } else {
         ne = GrabNatEntry(nm_i, nid);
         if (!ne) {
-          WriteUnlock(&nm_i->nat_tree_lock);
           continue;
         }
         NatSetBlkaddr(ne, LeToCpu(raw_ne.block_addr));
         NatSetIno(ne, LeToCpu(raw_ne.ino));
         NatSetVersion(ne, raw_ne.version);
         SetNatCacheDirty(nm_i, ne);
-        WriteUnlock(&nm_i->nat_tree_lock);
       }
     }
   }
@@ -1931,7 +1899,7 @@
 #if 0  // porting needed
   //	if (!flushed)
 #endif
-  fbl::AutoLock lock(&curseg->curseg_mutex);
+  fbl::AutoLock curseg_lock(&curseg->curseg_mutex);
 
   // 1) flush dirty nat caches
   list_for_every_safe(&nm_i->dirty_nat_entries, cur, n) {
@@ -1993,17 +1961,17 @@
     }
 
     if (NatGetBlkaddr(ne) == kNullAddr) {
-      WriteLock(&nm_i->nat_tree_lock);
-      DelFromNatCache(nm_i, ne);
-      WriteUnlock(&nm_i->nat_tree_lock);
+      {
+        std::lock_guard nat_lock(nm_i->nat_tree_lock);
+        DelFromNatCache(nm_i, ne);
+      }
 
       // We can reuse this freed nid at this point
       AddFreeNid(GetNmInfo(&sbi), nid);
     } else {
-      WriteLock(&nm_i->nat_tree_lock);
+      std::lock_guard nat_lock(nm_i->nat_tree_lock);
       ClearNatCacheDirty(nm_i, ne);
       ne->checkpointed = true;
-      WriteUnlock(&nm_i->nat_tree_lock);
     }
   }
 #if 0  // porting needed
@@ -2042,10 +2010,6 @@
   list_initialize(&nm_i->nat_entries);
   list_initialize(&nm_i->dirty_nat_entries);
 
-  mtx_init(&nm_i->build_lock, mtx_plain);
-  SpinLockInit(&nm_i->free_nid_list_lock);
-  RwlockInit(&nm_i->nat_tree_lock);
-
   nm_i->bitmap_size = BitmapSize(&sbi, MetaBitmap::kNatBitmap);
   nm_i->init_scan_nid = LeToCpu(sbi.ckpt->next_free_nid);
   nm_i->next_scan_nid = LeToCpu(sbi.ckpt->next_free_nid);
@@ -2094,18 +2058,17 @@
   if (!nm_i)
     return;
 
-  /* destroy free nid list */
-  SpinLock(&nm_i->free_nid_list_lock);
+  // destroy free nid list
+  std::lock_guard free_nid_lock(nm_i->free_nid_list_lock);
   list_for_every_entry_safe (&nm_i->free_nid_list, i, next_i, FreeNid, list) {
     ZX_ASSERT(i->state != static_cast<int>(NidState::kNidAlloc));
     DelFromFreeNidList(i);
     nm_i->fcnt--;
   }
   ZX_ASSERT(!nm_i->fcnt);
-  SpinUnlock(&nm_i->free_nid_list_lock);
 
   /* destroy nat cache */
-  WriteLock(&nm_i->nat_tree_lock);
+  std::lock_guard nat_lock(nm_i->nat_tree_lock);
   while ((found = GangLookupNatCache(nm_i, nid, kNatvecSize, natvec))) {
     uint32_t idx;
     for (idx = 0; idx < found; idx++) {
@@ -2116,7 +2079,6 @@
   }
   // TODO: Check nm_i->nat_cnt
   // ZX_ASSERT(!nm_i->nat_cnt);
-  WriteUnlock(&nm_i->nat_tree_lock);
 
   delete[] nm_i->nat_bitmap;
   delete[] nm_i->nat_prev_bitmap;
diff --git a/query.cc b/query.cc
index e66ccf2..8aa265c 100644
--- a/query.cc
+++ b/query.cc
@@ -68,8 +68,8 @@
   char name_buf[fuchsia_io2::wire::kMaxPathLength];
   if (request->query & FilesystemInfoQuery::kDevicePath) {
     size_t name_len;
-    zx_status_t status =
-        f2fs_->bc_->device()->GetDevicePath(fuchsia_io2::wire::kMaxPathLength, name_buf, &name_len);
+    zx_status_t status = f2fs_->GetBc().device()->GetDevicePath(fuchsia_io2::wire::kMaxPathLength,
+                                                                name_buf, &name_len);
     if (status != ZX_OK) {
       completer.ReplyError(status);
       return;
diff --git a/segment.cc b/segment.cc
index d208195..f8db681 100644
--- a/segment.cc
+++ b/segment.cc
@@ -47,9 +47,8 @@
 
 inline uint32_t SegMgr::FindNextInuse(FreeSegmapInfo *free_i, uint32_t max, uint32_t segno) {
   uint32_t ret;
-  ReadLock(&free_i->segmap_lock);
+  fs::SharedLock segmap_lock(free_i->segmap_lock);
   ret = find_next_bit_le(free_i->free_segmap, max, segno);
-  ReadUnlock(&free_i->segmap_lock);
   return ret;
 }
 
@@ -60,7 +59,7 @@
   uint32_t start_segno = secno * sbi.segs_per_sec;
   uint32_t next;
 
-  WriteLock(&free_i->segmap_lock);
+  std::lock_guard segmap_lock(free_i->segmap_lock);
   clear_bit(segno, free_i->free_segmap);
   free_i->free_segments++;
 
@@ -69,7 +68,6 @@
     clear_bit(secno, free_i->free_secmap);
     free_i->free_sections++;
   }
-  WriteUnlock(&free_i->segmap_lock);
 }
 
 inline void SegMgr::SetInuse(uint32_t segno) {
@@ -89,7 +87,7 @@
   uint32_t start_segno = secno * sbi.segs_per_sec;
   uint32_t next;
 
-  WriteLock(&free_i->segmap_lock);
+  std::lock_guard segmap_lock(free_i->segmap_lock);
   if (test_and_clear_bit(segno, free_i->free_segmap)) {
     free_i->free_segments++;
 
@@ -99,20 +97,18 @@
         free_i->free_sections++;
     }
   }
-  WriteUnlock(&free_i->segmap_lock);
 }
 
 inline void SegMgr::SetTestAndInuse(uint32_t segno) {
   SbInfo &sbi = fs_->GetSbInfo();
   FreeSegmapInfo *free_i = GetFreeInfo(&sbi);
   uint32_t secno = segno / sbi.segs_per_sec;
-  WriteLock(&free_i->segmap_lock);
+  std::lock_guard segmap_lock(free_i->segmap_lock);
   if (!test_and_set_bit(segno, free_i->free_segmap)) {
     free_i->free_segments--;
     if (!test_and_set_bit(secno, free_i->free_secmap))
       free_i->free_sections--;
   }
-  WriteUnlock(&free_i->segmap_lock);
 }
 
 void SegMgr::GetSitBitmap(void *dst_addr) {
@@ -140,9 +136,8 @@
   FreeSegmapInfo *free_i = GetFreeInfo(&sbi);
   uint32_t free_segs;
 
-  ReadLock(&free_i->segmap_lock);
+  fs::SharedLock segmap_lock(free_i->segmap_lock);
   free_segs = free_i->free_segments;
-  ReadUnlock(&free_i->segmap_lock);
 
   return free_segs;
 }
@@ -157,9 +152,8 @@
   FreeSegmapInfo *free_i = GetFreeInfo(&sbi);
   uint32_t free_secs;
 
-  ReadLock(&free_i->segmap_lock);
+  fs::SharedLock segmap_lock(free_i->segmap_lock);
   free_secs = free_i->free_sections;
-  ReadUnlock(&free_i->segmap_lock);
 
   return free_secs;
 }
@@ -508,7 +502,7 @@
       dirty_i->nr_dirty[static_cast<int>(DirtyType::kPre)]--;
 
     if (TestOpt(&sbi, kMountDiscard))
-      fs_->bc_->Trim(StartBlock(&sbi, segno), (1 << sbi.log_blocks_per_seg));
+      fs_->GetBc().Trim(StartBlock(&sbi, segno), (1 << sbi.log_blocks_per_seg));
   }
 }
 
@@ -658,7 +652,7 @@
   int i;
   bool got_it = false;
 
-  WriteLock(&free_i->segmap_lock);
+  std::lock_guard segmap_lock(free_i->segmap_lock);
 
   auto find_other_zone = [&]() -> bool {
     secno = find_next_zero_bit(free_i->free_secmap, total_secs, hint);
@@ -743,7 +737,6 @@
   ZX_ASSERT(!test_bit(segno, free_i->free_segmap));
   SetInuse(segno);
   *newseg = segno;
-  WriteUnlock(&free_i->segmap_lock);
 }
 
 void SegMgr::ResetCurseg(CursegType type, int modified) {
@@ -983,7 +976,7 @@
 #endif
 
 void SegMgr::SubmitWritePage(Page *page, block_t blk_addr, PageType type) {
-  zx_status_t ret = fs_->bc_->Writeblk(blk_addr, page->data);
+  zx_status_t ret = fs_->GetBc().Writeblk(blk_addr, page->data);
   if (ret) {
     FX_LOGS(ERROR) << "SubmitWritePage error " << ret;
   }
@@ -1714,7 +1707,6 @@
   sit_i->sents_per_block = kSitEntryPerBlock;
   sit_i->elapsed_time = LeToCpu(sbi.ckpt->elapsed_time);
   sit_i->mounted_time = cur_time;
-  mtx_init(&sit_i->sentry_lock, mtx_plain);
   return ZX_OK;
 }
 
@@ -1752,7 +1744,6 @@
   free_i->free_segments = 0;
   free_i->free_sections = 0;
 
-  RwlockInit(&free_i->segmap_lock);
   return ZX_OK;
 }
 
@@ -1768,7 +1759,6 @@
   GetSmInfo(&sbi)->curseg_array = array;
 
   for (i = 0; i < kNrCursegType; i++) {
-    mtx_init(&array[i].curseg_mutex, mtx_plain);
     array[i].sum_blk = static_cast<SummaryBlock *>(malloc(kPageCacheSize));
     memset(array[i].sum_blk, 0, kPageCacheSize);
     if (!array[i].sum_blk)
@@ -1894,7 +1884,6 @@
     return ZX_ERR_NO_MEMORY;
 
   GetSmInfo(&sbi)->dirty_info = dirty_i;
-  mtx_init(&dirty_i->seglist_lock, mtx_plain);
 
   bitmap_size = BitmapSize(TotalSegs(&sbi));
 
@@ -1949,8 +1938,6 @@
   // init sm info
   sbi.sm_info = sm_info;
 
-  list_initialize(&sm_info->wblist_head);
-  SpinLockInit(&sm_info->wblist_lock);
   sm_info->seg0_blkaddr = LeToCpu(raw_super->segment0_blkaddr);
   sm_info->main_blkaddr = LeToCpu(raw_super->main_blkaddr);
   sm_info->segment_count = LeToCpu(raw_super->segment_count);
diff --git a/segment.h b/segment.h
index 2050130..9027012 100644
--- a/segment.h
+++ b/segment.h
@@ -92,7 +92,7 @@
   uint64_t *dirty_sentries_bitmap = nullptr; /* bitmap for dirty sentries */
   uint32_t dirty_sentries = 0;               /* # of dirty sentries */
   uint32_t sents_per_block = 0;              /* # of SIT entries per block */
-  mtx_t sentry_lock;                         /* to protect SIT cache */
+  fbl::Mutex sentry_lock;                    /* to protect SIT cache */
   SegEntry *sentries = nullptr;              /* SIT segment-level cache */
   SecEntry *sec_entries = nullptr;           /* SIT section-level cache */
 
@@ -107,7 +107,7 @@
   uint32_t start_segno = 0;        /* start segment number logically */
   uint32_t free_segments = 0;      /* # of free segments */
   uint32_t free_sections = 0;      /* # of free sections */
-  rwlock_t segmap_lock;            /* free segmap lock */
+  fs::SharedMutex segmap_lock;     /* free segmap lock */
   uint64_t *free_segmap = nullptr; /* free segment bitmap */
   uint64_t *free_secmap = nullptr; /* free section bitmap */
 };
@@ -133,14 +133,14 @@
 struct DirtySeglistInfo {
   const VictimSelection *v_ops = nullptr; /* victim selction operation */
   uint64_t *dirty_segmap[static_cast<int>(DirtyType::kNrDirtytype)] = {};
-  mtx_t seglist_lock;                                           /* lock for segment bitmaps */
+  fbl::Mutex seglist_lock;                                      /* lock for segment bitmaps */
   int nr_dirty[static_cast<int>(DirtyType::kNrDirtytype)] = {}; /* # of dirty segments */
   uint64_t *victim_segmap[2] = {};                              /* BG_GC, FG_GC */
 };
 
 /* for active log information */
 struct CursegInfo {
-  mtx_t curseg_mutex;              /* lock for consistency */
+  fbl::Mutex curseg_mutex;         /* lock for consistency */
   SummaryBlock *sum_blk = nullptr; /* cached summary block */
   uint8_t alloc_type = 0;          /* current allocation type */
   uint32_t segno = 0;              /* current segment number */
diff --git a/super.cc b/super.cc
index bffe834..448237c 100644
--- a/super.cc
+++ b/super.cc
@@ -301,7 +301,6 @@
   //   sbi_->raw_super_buf = raw_super_buf;
 #endif
   sbi_->por_doing = 0;
-  SpinLockInit(&sbi_->stat_lock);
 #if 0  // porting needed
   // init_rwsem(&sbi->bio_sem);
 #endif
@@ -331,7 +330,6 @@
   sbi_->last_valid_block_count = sbi_->total_valid_block_count;
   sbi_->alloc_valid_block_count = 0;
   list_initialize(&sbi_->dir_inode_list);
-  SpinLockInit(&sbi_->dir_inode_lock);
 
   /* init super block */
 #if 0  // porting needed
@@ -437,53 +435,4 @@
   return err;
 }
 
-#if 0  // porting needed
-// dentry *F2fs::F2fsMount(file_system_type *fs_type, int flags,
-//       const char *dev_name, void *data)
-// {
-// //   return mount_bdev(fs_type, flags, dev_name, data, F2fs::FillSuper);
-//   return mount_bdev(fs_type, flags, dev_name, data, NULL);
-// }
-
-// int F2fs::InitInodecache(void) {
-//   Inode_cachep = KmemCacheCreate("Inode_cache",
-//       sizeof(InodeInfo), NULL);
-//   if (Inode_cachep == NULL)
-//     return -ENOMEM;
-// }
-
-// void F2fs::DestroyInodecache(void) {
-//   /*
-//    * Make sure all delayed rcu free inodes are flushed before we
-//    * destroy cache.
-//    */
-//   rcu_barrier();
-//   kmem_cache_destroy(Inode_cachep);
-// }
-
-// int /*__init*/ F2fs::initF2fsFs(void)
-// {
-//   int err;
-
-//   err = InitInodecache();
-//   if (err)
-//     goto fail;
-//   // TODO(unknown): should decide how to use slab cache before it
-//   //err = CreateNodeManagerCaches();
-//   if (err)
-//     goto fail;
-//   return register_filesystem(&f2fs_fs_type);
-// fail:
-//   return err;
-// }
-
-// void /*__exit*/ F2fs::exitF2fsFs(void)
-// {
-//   unregister_filesystem(&f2fs_fs_type);
-//   // TODO(unknown): should decide how to use slab cache before it
-//   //DestroyNodeManagerCaches();
-//   DestroyInodecache();
-// }
-#endif
-
 }  // namespace f2fs
diff --git a/test/unit/segment.cc b/test/unit/segment.cc
index 62a037e..d6a5ebe 100644
--- a/test/unit/segment.cc
+++ b/test/unit/segment.cc
@@ -51,7 +51,7 @@
     fs->Segmgr().WriteNodePage(root_node_page, RootIno(&sbi), old_addr, &alloc_addr);
     blk_chain.push_back(alloc_addr);
     ASSERT_NE(alloc_addr, kNullAddr);
-    ASSERT_EQ(fs->bc_->Readblk(blk_chain[i], read_page->data), ZX_OK);
+    ASSERT_EQ(fs->GetBc().Readblk(blk_chain[i], read_page->data), ZX_OK);
     ASSERT_EQ(alloc_addr, fs->Nodemgr().NextBlkaddrOfNode(read_page));
     F2fsPutPage(read_page, 0);
   }
diff --git a/test/unit/unit_lib.cc b/test/unit/unit_lib.cc
index b4781e8..0e6004e 100644
--- a/test/unit/unit_lib.cc
+++ b/test/unit/unit_lib.cc
@@ -32,7 +32,7 @@
 
 void Unmount(std::unique_ptr<F2fs> fs, std::unique_ptr<Bcache> *bc) {
   fs->PutSuper();
-  *bc = std::move(fs->bc_);
+  fs->ResetBc(bc);
   fs.reset();
 }
 
@@ -210,7 +210,7 @@
   SbInfo &sbi = fs->GetSbInfo();
   NmInfo *nm_i = GetNmInfo(&sbi);
 
-  fbl::AutoLock lock(&nm_i->free_nid_list_lock);
+  std::lock_guard lock(nm_i->free_nid_list_lock);
   for (auto nid : nids) {
     bool found = false;
     list_node_t *iter;
@@ -229,7 +229,7 @@
   SbInfo &sbi = fs->GetSbInfo();
   NmInfo *nm_i = GetNmInfo(&sbi);
 
-  fbl::AutoLock lock(&nm_i->free_nid_list_lock);
+  std::lock_guard lock(nm_i->free_nid_list_lock);
   for (auto nid : nids) {
     bool found = false;
     list_node_t *iter;
diff --git a/vnode.cc b/vnode.cc
index 4fa48c2..0e461ac 100644
--- a/vnode.cc
+++ b/vnode.cc
@@ -413,37 +413,37 @@
 
   pgoff_t free_from = static_cast<pgoff_t>((from + blocksize - 1) >> (sbi.log_blocksize));
 
-  mutex_lock_op(&sbi, LockType::kDataTrunc);
+  {
+    fs::SharedLock rlock(sbi.fs_lock[static_cast<int>(LockType::kDataTrunc)]);
+    std::lock_guard write_lock(io_lock_);
 
-  do {
-    SetNewDnode(&dn, this, nullptr, nullptr, 0);
-    err = Vfs()->Nodemgr().GetDnodeOfData(&dn, free_from, kRdOnlyNode);
-    if (err) {
-      if (err == ZX_ERR_NOT_FOUND)
-        break;
-      mutex_unlock_op(&sbi, LockType::kDataTrunc);
-      return err;
-    }
+    do {
+      SetNewDnode(&dn, this, nullptr, nullptr, 0);
+      err = Vfs()->Nodemgr().GetDnodeOfData(&dn, free_from, kRdOnlyNode);
+      if (err) {
+        if (err == ZX_ERR_NOT_FOUND)
+          break;
+        return err;
+      }
 
-    if (IsInode(dn.node_page))
-      count = kAddrsPerInode;
-    else
-      count = kAddrsPerBlock;
+      if (IsInode(dn.node_page))
+        count = kAddrsPerInode;
+      else
+        count = kAddrsPerBlock;
 
-    count -= dn.ofs_in_node;
-    ZX_ASSERT(count >= 0);
-    if (dn.ofs_in_node || IsInode(dn.node_page)) {
-      TruncateDataBlocksRange(&dn, count);
-      free_from += count;
-    }
+      count -= dn.ofs_in_node;
+      ZX_ASSERT(count >= 0);
+      if (dn.ofs_in_node || IsInode(dn.node_page)) {
+        TruncateDataBlocksRange(&dn, count);
+        free_from += count;
+      }
 
-    F2fsPutDnode(&dn);
-  } while (false);
+      F2fsPutDnode(&dn);
+    } while (false);
 
-  err = Vfs()->Nodemgr().TruncateInodeBlocks(this, free_from);
-  mutex_unlock_op(&sbi, LockType::kDataTrunc);
-
-  /* lastly zero out the first data page */
+    err = Vfs()->Nodemgr().TruncateInodeBlocks(this, free_from);
+  }
+  // lastly zero out the first data page
   TruncatePartialDataPage(from);
 
   return err;
@@ -456,7 +456,7 @@
     DnodeOfData dn;
     SbInfo &sbi = Vfs()->GetSbInfo();
 
-    fbl::AutoLock lock(&sbi.fs_lock[static_cast<int>(LockType::kDataTrunc)]);
+    fs::SharedLock rlock(sbi.fs_lock[static_cast<int>(LockType::kDataTrunc)]);
     SetNewDnode(&dn, this, NULL, NULL, 0);
     if (zx_status_t err = Vfs()->Nodemgr().GetDnodeOfData(&dn, index, kRdOnlyNode); err != ZX_OK) {
       if (err == ZX_ERR_NOT_FOUND)
@@ -517,7 +517,6 @@
 #endif
   AtomicSet(&fi_.dirty_dents, 0);
   SetCurDirDepth(1);
-  RwlockInit(&fi_.ext.ext_lock);
   SetFlag(InodeInfoFlag::kInit);
   Activate();
 }
@@ -664,19 +663,17 @@
 }
 
 inline void VnodeF2fs::GetExtentInfo(const Extent &i_ext) {
-  WriteLock(&fi_.ext.ext_lock);
+  std::lock_guard lock(fi_.ext.ext_lock);
   fi_.ext.fofs = LeToCpu(i_ext.fofs);
   fi_.ext.blk_addr = LeToCpu(i_ext.blk_addr);
   fi_.ext.len = LeToCpu(i_ext.len);
-  WriteUnlock(&fi_.ext.ext_lock);
 }
 
 inline void VnodeF2fs::SetRawExtent(Extent &i_ext) {
-  ReadLock(&fi_.ext.ext_lock);
+  fs::SharedLock lock(fi_.ext.ext_lock);
   i_ext.fofs = CpuToLe(fi_.ext.fofs);
   i_ext.blk_addr = CpuToLe(fi_.ext.blk_addr);
   i_ext.len = CpuToLe(fi_.ext.len);
-  ReadUnlock(&fi_.ext.ext_lock);
 }
 
 }  // namespace f2fs
diff --git a/vnode.h b/vnode.h
index 60de4d4..18c30c3 100644
--- a/vnode.h
+++ b/vnode.h
@@ -294,6 +294,7 @@
  protected:
   void RecycleNode() override;
   std::condition_variable_any flag_cvar_{};
+  fs::SharedMutex io_lock_;
 
  private:
   zx_status_t OpenNode(ValidatedOptions options, fbl::RefPtr<Vnode> *out_redirect) final