blob: 8ff85ab9711c3b1efe8260a80c0c4704f2d49144 [file] [log] [blame]
// Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <sys/stat.h>
#include <string.h>
#include "f2fs.h"
namespace f2fs {
#if 0 // porting needed
// static kmem_cache *f2fs_inode_cachep;
// enum {
// Opt_gc_background_off,
// Opt_disable_roll_forward,
// Opt_discard,
// Opt_noheap,
// Opt_nouser_xattr,
// Opt_noacl,
// Opt_active_logs,
// Opt_disable_ext_identify,
// Opt_err,
// };
// static match_table_t f2fs_tokens = {
// {Opt_gc_background_off, "background_gc_off"},
// {Opt_disable_roll_forward, "disable_roll_forward"},
// {Opt_discard, "discard"},
// {Opt_noheap, "no_heap"},
// {Opt_nouser_xattr, "nouser_xattr"},
// {Opt_noacl, "noacl"},
// {Opt_active_logs, "active_logs=%u"},
// {Opt_disable_ext_identify, "disable_ext_identify"},
// {Opt_err, NULL},
// };
// void F2fs::InitOnce(void *foo)
// {
// f2fs_inode_info *fi = (f2fs_inode_info *) foo;
// memset(fi, 0, sizeof(*fi));
// inode_init_once(&fi->vfs_inode);
// }
// VnodeF2fs *F2fs::F2fsAllocInode() {
// f2fs_inode_info *fi;
// // fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_NOFS | __GFP_ZERO);
// // if (!fi)
// // return NULL;
// fi = new f2fs_inode_info();
// InitOnce((void *) fi);
// /* Initilize f2fs-specific inode info */
// AtomicSet(&fi->vfs_inode.i_version, 1);
// AtomicSet(&fi->dirty_dents, 0);
// fi->i_current_depth = 1;
// fi->i_advise = 0;
// RwlockInit(&fi->ext.ext_lock);
// SetInodeFlag(fi, FI_NEW_INODE);
// return &fi->vfs_inode;
// }
// void F2fs::F2fsICallback(rcu_head *head)
// {
// [[maybe_unused]] inode *inode = container_of(head, inode, i_rcu);
// // kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
// }
// void F2fs::F2fsDestroyInode(inode *inode)
// {
// call_rcu(&inode->i_rcu, F2fsICallback);
// }
#endif
void F2fs::PutSuper() {
f2fs_destroy_stats(sbi_.get());
#if 0 // porting needed
// stop_gc_thread(sbi_.get());
#endif
WriteCheckpoint(false, true);
#if 0 // porting needed
// Iput(sbi_->node_inode);
// Iput(sbi_->meta_inode);
#endif
fbl::AutoLock lock(&vnode_table_lock_);
vnode_table_.erase(F2FS_NODE_INO(sbi_));
vnode_table_.erase(F2FS_META_INO(sbi_));
/* destroy f2fs internal modules */
node_mgr_->DestroyNodeManager();
seg_mgr_->DestroySegmentManager();
delete sbi_->ckpt;
#if 0 // porting needed
// brelse(sbi_->raw_super_buf);
#endif
node_mgr_.reset();
seg_mgr_.reset();
raw_sb_.reset();
sbi_.reset();
}
zx_status_t F2fs::SyncFs(int sync) {
#ifdef F2FS_BU_DEBUG
std::cout << "F2fs::SyncFs, sbi_->s_dirty=" << sbi_->s_dirty << std::endl;
#endif
#if 0 // porting needed
//if (!sbi_->s_dirty && !get_pages(sbi_.get(), F2FS_DIRTY_NODES))
// return 0;
#endif
if (sync)
WriteCheckpoint(false, false);
return ZX_OK;
}
#if 0 // porting needed
// int F2fs::F2fsStatfs(dentry *dentry /*, kstatfs *buf*/) {
// super_block *sb = dentry->d_sb;
// f2fs_sb_info *sbi = F2FS_SB(sb);
// u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
// block_t total_count, user_block_count, start_count, ovp_count;
// total_count = LeToCpu(sbi->raw_super->block_count);
// user_block_count = sbi->user_block_count;
// start_count = LeToCpu(sbi->raw_super->segment0_blkaddr);
// ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
// buf->f_type = kF2fsSuperMagic;
// buf->f_bsize = sbi->blocksize;
// buf->f_blocks = total_count - start_count;
// buf->f_bfree = buf->f_blocks - valid_user_blocks(sbi) - ovp_count;
// buf->f_bavail = user_block_count - valid_user_blocks(sbi);
// buf->f_files = valid_inode_count(sbi);
// buf->f_ffree = sbi->total_node_count - valid_node_count(sbi);
// buf->f_namelen = F2FS_MAX_NAME_LEN;
// buf->f_fsid.val[0] = (u32)id;
// buf->f_fsid.val[1] = (u32)(id >> 32);
// return 0;
// }
// int F2fs::F2fsShowOptions(/*seq_file *seq*/) {
// if (test_opt(sbi, BG_GC))
// seq_puts(seq, ",background_gc_on");
// else
// seq_puts(seq, ",background_gc_off");
// if (test_opt(sbi, DISABLE_ROLL_FORWARD))
// seq_puts(seq, ",disable_roll_forward");
// if (test_opt(sbi, DISCARD))
// seq_puts(seq, ",discard");
// if (test_opt(sbi, NOHEAP))
// seq_puts(seq, ",no_heap_alloc");
// if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
// seq_puts(seq, ",disable_ext_indentify");
// seq_printf(seq, ",active_logs=%u", sbi->active_logs);
// return 0;
// }
// VnodeF2fs *F2fs::F2fsNfsGetInode(uint64_t ino, uint32_t generation) {
// fbl::RefPtr<VnodeF2fs> vnode_refptr;
// VnodeF2fs *vnode = nullptr;
// int err;
// if (ino < F2FS_ROOT_INO(sbi_.get()))
// return (VnodeF2fs *)ErrPtr(-ESTALE);
// /*
// * f2fs_iget isn't quite right if the inode is currently unallocated!
// * However f2fs_iget currently does appropriate checks to handle stale
// * inodes so everything is OK.
// */
// err = VnodeF2fs::Vget(this, ino, &vnode_refptr);
// if (err)
// return (VnodeF2fs *)ErrPtr(err);
// vnode = vnode_refptr.get();
// if (generation && vnode->i_generation != generation) {
// /* we didn't find the right inode.. */
// Iput(vnode);
// return (VnodeF2fs *)ErrPtr(-ESTALE);
// }
// return vnode;
// }
// struct fid {};
// dentry *F2fs::F2fsFhToDentry(fid *fid, int fh_len, int fh_type) {
// return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
// f2fs_nfs_get_inode);
// }
// dentry *F2fs::F2fsFhToParent(fid *fid, int fh_len, int fh_type) {
// return generic_fh_to_parent(sb, fid, fh_len, fh_type,
// f2fs_nfs_get_inode);
// }
// int F2fs::ParseOptions(char *options) {
// substring_t args[MAX_OPT_ARGS];
// char *p;
// int arg = 0;
// if (!options)
// return 0;
// while ((p = strsep(&options, ",")) != NULL) {
// int token;
// if (!*p)
// continue;
// /*
// * Initialize args struct so we know whether arg was
// * found; some options take optional arguments.
// */
// args[0].to = args[0].from = NULL;
// token = match_token(p, f2fs_tokens, args);
// switch (token) {
// case Opt_gc_background_off:
// clear_opt(sbi_.get(), BG_GC);
// break;
// case Opt_disable_roll_forward:
// set_opt(sbi_.get(), DISABLE_ROLL_FORWARD);
// break;
// case Opt_discard:
// set_opt(sbi_.get(), DISCARD);
// break;
// case Opt_noheap:
// set_opt(sbi_.get(), NOHEAP);
// break;
// case Opt_active_logs:
// if (args->from && match_int(args, &arg))
// return -EINVAL;
// if (arg != 2 && arg != 4 && arg != 6)
// return -EINVAL;
// sbi_.get()->active_logs = arg;
// break;
// case Opt_disable_ext_identify:
// set_opt(sbi_.get(), DISABLE_EXT_IDENTIFY);
// break;
// default:
// return -EINVAL;
// }
// }
// return 0;
// }
// loff_t F2fs::MaxFileSize(unsigned bits) {
// loff_t result = ADDRS_PER_INODE;
// loff_t leaf_count = ADDRS_PER_BLOCK;
// /* two direct node blocks */
// result += (leaf_count * 2);
// /* two indirect node blocks */
// leaf_count *= NIDS_PER_BLOCK;
// result += (leaf_count * 2);
// /* one double indirect node block */
// leaf_count *= NIDS_PER_BLOCK;
// result += leaf_count;
// result <<= bits;
// return result;
// }
#endif
int F2fs::SanityCheckRawSuper() {
unsigned int blocksize;
if (kF2fsSuperMagic != LeToCpu(raw_sb_->magic))
return 1;
/* Currently, support only 4KB block size */
blocksize = 1 << LeToCpu(raw_sb_->log_blocksize);
if (blocksize != kPageCacheSize)
return 1;
if (LeToCpu(raw_sb_->log_sectorsize) != F2FS_LOG_SECTOR_SIZE)
return 1;
if (LeToCpu(raw_sb_->log_sectors_per_block) != F2FS_LOG_SECTORS_PER_BLOCK)
return 1;
return 0;
}
int F2fs::SanityCheckCkpt() {
unsigned int total, fsmeta;
total = LeToCpu(raw_sb_->segment_count);
fsmeta = LeToCpu(raw_sb_->segment_count_ckpt);
fsmeta += LeToCpu(raw_sb_->segment_count_sit);
fsmeta += LeToCpu(raw_sb_->segment_count_nat);
fsmeta += LeToCpu(sbi_->ckpt->rsvd_segment_count);
fsmeta += LeToCpu(raw_sb_->segment_count_ssa);
if (fsmeta >= total)
return 1;
return 0;
}
void F2fs::InitSbInfo() {
int i;
sbi_->log_sectors_per_block = LeToCpu(RawSb().log_sectors_per_block);
sbi_->log_blocksize = LeToCpu(RawSb().log_blocksize);
sbi_->blocksize = 1 << sbi_->log_blocksize;
sbi_->log_blocks_per_seg = LeToCpu(RawSb().log_blocks_per_seg);
sbi_->blocks_per_seg = 1 << sbi_->log_blocks_per_seg;
sbi_->segs_per_sec = LeToCpu(RawSb().segs_per_sec);
sbi_->secs_per_zone = LeToCpu(RawSb().secs_per_zone);
sbi_->total_sections = LeToCpu(RawSb().section_count);
sbi_->total_node_count =
(LeToCpu(RawSb().segment_count_nat) / 2) * sbi_->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
sbi_->root_ino_num = LeToCpu(RawSb().root_ino);
sbi_->node_ino_num = LeToCpu(RawSb().node_ino);
sbi_->meta_ino_num = LeToCpu(RawSb().meta_ino);
for (i = 0; i < NR_COUNT_TYPE; i++)
AtomicSet(&sbi_->nr_pages[i], 0);
}
zx_status_t F2fs::FillSuper() {
#if 0 // porting needed
// f2fs_super_block *raw_super;
// buffer_head *raw_super_buf = NULL;
#endif
VnodeF2fs *root;
zx_status_t err = ZX_ERR_INVALID_ARGS;
/* allocate memory for f2fs-specific super block info */
sbi_ = std::make_unique<f2fs_sb_info>();
if (!sbi_)
return ZX_ERR_NO_MEMORY;
memset(sbi_.get(), 0, sizeof(f2fs_sb_info));
#if 0 // porting needed
// super_block *sb = sbi_->sb;
/* set a temporary block size */
// if (!SbSetBlocksize(sb, F2FS_BLKSIZE))
// goto free_sbi;
#endif
/* mount options are fixed as below */
clear_opt(sbi_, BG_GC);
clear_opt(sbi_, DISCARD);
set_opt(sbi_, NOHEAP);
clear_opt(sbi_, XATTR_USER);
clear_opt(sbi_, POSIX_ACL);
sbi_->active_logs = NR_CURSEG_TYPE;
set_opt(sbi_, DISABLE_EXT_IDENTIFY);
#if 0 // porting needed
/* parse mount options */
// if (ParseOptions((char *)data))
// goto free_sb_buf;
#endif
/* sanity checking of raw super */
if (SanityCheckRawSuper())
goto free_sb_buf;
#if 0 // porting needed
// sb->s_maxbytes = MaxFileSize(RawSb().log_blocksize);
// sb->s_max_links = F2FS_LINK_MAX;
// For NFS support
// get_random_bytes(&sbi->s_next_generation, sizeof(uint32_t));
// sb->s_op = &f2fs_sops;
// sb->s_xattr = f2fs_xattr_handlers;
// sb->s_export_op = &f2fs_export_ops;
// sb->s_magic = kF2fsSuperMagic;
// sb->s_fs_info = sbi_.get();
// sb->s_time_gran = 1;
// sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
// (test_opt(sbi_, POSIX_ACL) ? MS_POSIXACL : 0);
// memcpy(&sb->s_uuid, RawSb().uuid, sizeof(RawSb().uuid));
/* init f2fs-specific super block info */
// sbi->sb = sb;
#endif
sbi_->raw_super = raw_sb_.get();
#if 0 // porting needed
// sbi_->raw_super_buf = raw_super_buf;
#endif
sbi_->por_doing = 0;
SpinLockInit(&sbi_->stat_lock);
#if 0 // porting needed
// init_rwsem(&sbi->bio_sem);
#endif
InitSbInfo();
/* get an inode for meta space */
#if 0 // porting needed
// err = VnodeF2fs::Vget(this, F2FS_META_INO(sbi_), &sbi_->meta_vnode);
// if (err) {
// goto free_sb_buf;
// }
#endif
err = GetValidCheckpoint();
if (err)
goto free_meta_inode;
/* sanity checking of checkpoint */
err = ZX_ERR_INVALID_ARGS;
if (SanityCheckCkpt())
goto free_cp;
sbi_->total_valid_node_count = LeToCpu(sbi_->ckpt->valid_node_count);
sbi_->total_valid_inode_count = LeToCpu(sbi_->ckpt->valid_inode_count);
sbi_->user_block_count = LeToCpu(sbi_->ckpt->user_block_count);
sbi_->total_valid_block_count = LeToCpu(sbi_->ckpt->valid_block_count);
sbi_->last_valid_block_count = sbi_->total_valid_block_count;
sbi_->alloc_valid_block_count = 0;
list_initialize(&sbi_->dir_inode_list);
SpinLockInit(&sbi_->dir_inode_lock);
/* init super block */
#if 0 // porting needed
// if (!SbSetBlocksize(sb, sbi_->blocksize))
// goto free_cp;
#endif
InitOrphanInfo();
/* setup f2fs internal modules */
seg_mgr_ = std::make_unique<SegMgr>(this);
err = seg_mgr_->BuildSegmentManager();
if (err)
goto free_sm;
node_mgr_ = std::make_unique<NodeMgr>(this);
err = node_mgr_->BuildNodeManager();
if (err)
goto free_nm;
#if 0 // porting needed
// build_gc_manager(sbi);
/* get an inode for node space */
// err = VnodeF2fs::Vget(this, F2FS_NODE_INO(sbi_), &sbi_->node_vnode);
// if (err) {
// goto free_nm;
// }
#endif
/* if there are nt orphan nodes free them */
err = ZX_ERR_INVALID_ARGS;
if (!(sbi_->ckpt->ckpt_flags & CP_UMOUNT_FLAG) && RecoverOrphanInodes())
goto free_node_inode;
/* read root inode and dentry */
err = VnodeF2fs::Vget(this, F2FS_ROOT_INO(sbi_), &root_vnode_);
if (err) {
goto free_node_inode;
}
root = root_vnode_.get();
if (!S_ISDIR(root->i_mode_) || !root->i_blocks_ || !root->i_size_)
goto free_root_inode;
#if 0 // porting needed
// sb->s_root = DMakeRoot(root); /* allocate root dentry */
// if (!sb->s_root) {
// err = ZX_ERR_NO_MEMORY;
// goto free_root_inode;
// }
#endif
/* recover fsynced data */
if (!(sbi_->ckpt->ckpt_flags & CP_UMOUNT_FLAG) &&
!test_opt(sbi_.get(), DISABLE_ROLL_FORWARD)) {
RecoverFsyncData();
}
#if 0 // porting needed
/* After POR, we can run background GC thread */
// err = start_gc_thread(sbi);
// if (err)
// goto fail;
#endif
err = f2fs_build_stats(sbi_.get());
if (err)
goto fail;
return ZX_OK;
fail:
#if 0 // porting needed
// stop_gc_thread(sbi);
#endif
free_root_inode:
#if 0 // porting needed
// dput(sb->s_root);
// sb->s_root = NULL;
#endif
free_node_inode:
#if 0 // porting needed
// Iput(sbi_->node_inode);
#endif
free_nm:
node_mgr_->DestroyNodeManager();
node_mgr_.reset();
free_sm:
seg_mgr_->DestroySegmentManager();
seg_mgr_.reset();
free_cp:
delete sbi_->ckpt;
free_meta_inode:
#if 0 // porting needed
// MakeBadInode(sbi_->meta_inode);
// Iput(sbi_->meta_inode);
#endif
free_sb_buf:
#if 0 // porting needed
// brelse(raw_super_buf);
// free_sbi:
#endif
sbi_.reset();
return err;
}
#if 0 // porting needed
// dentry *F2fs::F2fsMount(file_system_type *fs_type, int flags,
// const char *dev_name, void *data)
// {
// // return mount_bdev(fs_type, flags, dev_name, data, F2fs::FillSuper);
// return mount_bdev(fs_type, flags, dev_name, data, NULL);
// }
// int F2fs::InitInodecache(void) {
// f2fs_inode_cachep = f2fs_kmem_cache_create("f2fs_inode_cache",
// sizeof(f2fs_inode_info), NULL);
// if (f2fs_inode_cachep == NULL)
// return -ENOMEM;
// }
// void F2fs::DestroyInodecache(void) {
// /*
// * Make sure all delayed rcu free inodes are flushed before we
// * destroy cache.
// */
// rcu_barrier();
// kmem_cache_destroy(f2fs_inode_cachep);
// }
// int /*__init*/ F2fs::initF2fsFs(void)
// {
// int err;
// err = InitInodecache();
// if (err)
// goto fail;
// // TODO(unknown): should decide how to use slab cache before it
// //err = CreateNodeManagerCaches();
// if (err)
// goto fail;
// return register_filesystem(&f2fs_fs_type);
// fail:
// return err;
// }
// void /*__exit*/ F2fs::exitF2fsFs(void)
// {
// unregister_filesystem(&f2fs_fs_type);
// // TODO(unknown): should decide how to use slab cache before it
// //DestroyNodeManagerCaches();
// DestroyInodecache();
// }
#endif
} // namespace f2fs