blob: 2dddbafbe90f8eceeced35c7fc21e356e3e2dc95 [file] [log] [blame]
// Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef F2FS_INTERNAL_H_
#define F2FS_INTERNAL_H_
#include <lib/zircon-internal/thread_annotations.h>
namespace f2fs {
class VnodeF2fs;
/*
* For mount options
*/
#define F2FS_MOUNT_BG_GC 0x00000001
#define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002
#define F2FS_MOUNT_DISCARD 0x00000004
#define F2FS_MOUNT_NOHEAP 0x00000008
#define F2FS_MOUNT_XATTR_USER 0x00000010
#define F2FS_MOUNT_POSIX_ACL 0x00000020
#define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040
#define clear_opt(sbi, option) ((sbi)->mount_opt.opt &= ~F2FS_MOUNT_##option)
#define set_opt(sbi, option) ((sbi)->mount_opt.opt |= F2FS_MOUNT_##option)
#define test_opt(sbi, option) ((sbi)->mount_opt.opt & F2FS_MOUNT_##option)
struct f2fs_mount_info {
unsigned int opt;
};
/*
* For checkpoint manager
*/
enum { NAT_BITMAP, SIT_BITMAP };
/* for the list of orphan inodes */
struct orphan_inode_entry {
list_node_t list; /* list head */
nid_t ino; /* inode number */
};
/* for the list of directory inodes */
struct dir_inode_entry {
list_node_t list; /* list head */
VnodeF2fs *vnode; /* vfs inode pointer */
};
/* for the list of fsync inodes, used only during recovery */
struct fsync_inode_entry {
list_node_t list; /* list head */
VnodeF2fs *vnode; /* vfs inode pointer */
block_t blkaddr; /* block address locating the last inode */
};
#define nats_in_cursum(sum) (LeToCpu(sum->n_nats))
#define sits_in_cursum(sum) (LeToCpu(sum->n_sits))
#define nat_in_journal(sum, i) (sum->nat_j.entries[i].ne)
#define nid_in_journal(sum, i) (sum->nat_j.entries[i].nid)
#define sit_in_journal(sum, i) (sum->sit_j.entries[i].se)
#define segno_in_journal(sum, i) (sum->sit_j.entries[i].segno)
static inline int update_nats_in_cursum(struct f2fs_summary_block *rs, int i) {
int before = nats_in_cursum(rs);
rs->n_nats = CpuToLe(static_cast<uint32_t>(before + i));
return before;
}
static inline int update_sits_in_cursum(struct f2fs_summary_block *rs, int i) {
int before = sits_in_cursum(rs);
rs->n_sits = CpuToLe(static_cast<uint32_t>(before + i));
return before;
}
/*
* For INODE and NODE manager
*/
#define XATTR_NODE_OFFSET \
(-1) /* \
* store xattrs to one node block per \
* file keeping -1 as its node offset to \
* distinguish from index node blocks. \
*/
#define RDONLY_NODE \
1 /* \
* specify a read-only mode when getting \
* a node block. 0 is read-write mode. \
* used by get_dnode_of_data(). \
*/
#define F2FS_LINK_MAX 32000 /* maximum link count per file */
/* for in-memory extent cache entry */
struct extent_info {
rwlock_t ext_lock; /* rwlock for consistency */
unsigned int fofs; /* start offset in a file */
uint32_t blk_addr; /* start block address of the extent */
unsigned int len; /* lenth of the extent */
};
/*
* i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
*/
#define FADVISE_COLD_BIT 0x01
struct f2fs_inode_info {
// struct inode vfs_inode; /* serve a vfs inode */
unsigned long i_flags; /* keep an inode flags for ioctl */
unsigned char i_advise; /* use to give file attribute hints */
unsigned int i_current_depth; /* use only in directory structure */
umode_t i_acl_mode; /* keep file acl mode temporarily */
/* Use below internally in f2fs*/
unsigned long flags; /* use to pass per-file flags */
unsigned long long data_version; /* lastes version of data for fsync */
atomic_t dirty_dents; /* # of dirty dentry pages */
f2fs_hash_t chash; /* hash value of given file name */
unsigned int clevel; /* maximum level of given file name */
nid_t i_xattr_nid; /* node id that contains xattrs */
struct extent_info ext; /* in-memory extent cache entry */
};
static inline void GetExtentInfo(struct extent_info *ext, struct f2fs_extent i_ext) {
WriteLock(&ext->ext_lock);
ext->fofs = LeToCpu(i_ext.fofs);
ext->blk_addr = LeToCpu(i_ext.blk_addr);
ext->len = LeToCpu(i_ext.len);
WriteUnlock(&ext->ext_lock);
}
static inline void set_raw_extent(struct extent_info *ext, struct f2fs_extent *i_ext) {
ReadLock(&ext->ext_lock);
i_ext->fofs = CpuToLe(ext->fofs);
i_ext->blk_addr = CpuToLe(ext->blk_addr);
i_ext->len = CpuToLe(ext->len);
ReadUnlock(&ext->ext_lock);
}
struct f2fs_nm_info {
block_t nat_blkaddr; /* base disk address of NAT */
nid_t max_nid; /* maximum possible node ids */
nid_t init_scan_nid; /* the first nid to be scanned */
nid_t next_scan_nid; /* the next nid to be scanned */
/* NAT cache management */
RadixTreeRoot nat_root; /* root of the nat entry cache */
rwlock_t nat_tree_lock; /* protect nat_tree_lock */
unsigned int nat_cnt; /* the # of cached nat entries */
list_node_t nat_entries; /* cached nat entry list (clean) */
list_node_t dirty_nat_entries; /* cached nat entry list (dirty) */
/* free node ids management */
list_node_t free_nid_list; /* a list for free nids */
spinlock_t free_nid_list_lock; /* protect free nid list */
unsigned int fcnt; /* the number of free node id */
mtx_t build_lock; /* lock for build free nids */
/* for checkpoint */
char *nat_bitmap; /* NAT bitmap pointer */
char *nat_prev_bitmap; /* JY: NAT previous checkpoint bitmap pointer */
int bitmap_size; /* bitmap size */
};
/*
* this structure is used as one of function parameters.
* all the information are dedicated to a given direct node block determined
* by the data offset in a file.
*/
struct dnode_of_data {
// struct inode *inode; /* vfs inode pointer */
VnodeF2fs *vnode;
struct Page *inode_page; /* its inode page, NULL is possible */
struct Page *node_page; /* cached direct node page */
nid_t nid; /* node id of the direct node block */
unsigned int ofs_in_node; /* data offset in the node page */
bool inode_page_locked; /* inode page is locked or not */
block_t data_blkaddr; /* block address of the node block */
};
static inline void SetNewDnode(dnode_of_data *dn, VnodeF2fs *vnode, Page *ipage, Page *npage,
nid_t nid) {
dn->vnode = vnode;
dn->inode_page = ipage;
dn->node_page = npage;
dn->nid = nid;
dn->inode_page_locked = 0;
}
/*
* For SIT manager
*
* By default, there are 6 active log areas across the whole main area.
* When considering hot and cold data separation to reduce cleaning overhead,
* we split 3 for data logs and 3 for node logs as hot, warm, and cold types,
* respectively.
* In the current design, you should not change the numbers intentionally.
* Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6
* logs individually according to the underlying devices. (default: 6)
* Just in case, on-disk layout covers maximum 16 logs that consist of 8 for
* data and 8 for node logs.
*/
#define NR_CURSEG_DATA_TYPE (3)
#define NR_CURSEG_NODE_TYPE (3)
#define NR_CURSEG_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
enum {
CURSEG_HOT_DATA = 0, /* directory entry blocks */
CURSEG_WARM_DATA, /* data blocks */
CURSEG_COLD_DATA, /* multimedia or GCed data blocks */
CURSEG_HOT_NODE, /* direct node blocks of directory files */
CURSEG_WARM_NODE, /* direct node blocks of normal files */
CURSEG_COLD_NODE, /* indirect node blocks */
NO_CHECK_TYPE
};
struct f2fs_sm_info {
struct sit_info *sit_info; /* whole segment information */
struct free_segmap_info *free_info; /* free segment information */
struct dirty_seglist_info *dirty_info; /* dirty segment information */
struct curseg_info *curseg_array; /* active segment information */
list_node_t wblist_head; /* list of under-writeback pages */
spinlock_t wblist_lock; /* lock for checkpoint */
block_t seg0_blkaddr; /* block address of 0'th segment */
block_t main_blkaddr; /* start block address of main area */
block_t ssa_blkaddr; /* start block address of SSA area */
unsigned int segment_count; /* total # of segments */
unsigned int main_segments; /* # of segments in main area */
unsigned int reserved_segments; /* # of reserved segments */
unsigned int ovp_segments; /* # of overprovision segments */
};
/*
* For directory operation
*/
#define NODE_DIR1_BLOCK (ADDRS_PER_INODE + 1)
#define NODE_DIR2_BLOCK (ADDRS_PER_INODE + 2)
#define NODE_IND1_BLOCK (ADDRS_PER_INODE + 3)
#define NODE_IND2_BLOCK (ADDRS_PER_INODE + 4)
#define NODE_DIND_BLOCK (ADDRS_PER_INODE + 5)
/*
* For superblock
*/
/*
* COUNT_TYPE for monitoring
*
* f2fs monitors the number of several block types such as on-writeback,
* dirty dentry blocks, dirty node blocks, and dirty meta blocks.
*/
enum count_type {
F2FS_WRITEBACK,
F2FS_DIRTY_DENTS,
F2FS_DIRTY_NODES,
F2FS_DIRTY_META,
NR_COUNT_TYPE,
};
/*
* FS_LOCK nesting subclasses for the lock validator:
*
* The locking order between these classes is
* RENAME -> DENTRY_OPS -> DATA_WRITE -> DATA_NEW
* -> DATA_TRUNC -> NODE_WRITE -> NODE_NEW -> NODE_TRUNC
*/
enum lock_type {
RENAME, /* for renaming operations */
DENTRY_OPS, /* for directory operations */
DATA_WRITE, /* for data write */
DATA_NEW, /* for data allocation */
DATA_TRUNC, /* for data truncate */
NODE_NEW, /* for node allocation */
NODE_TRUNC, /* for node truncate */
NODE_WRITE, /* for node write */
NR_LOCK_TYPE,
};
/*
* The below are the page types of bios used in submti_bio().
* The available types are:
* DATA User data pages. It operates as async mode.
* NODE Node pages. It operates as async mode.
* META FS metadata pages such as SIT, NAT, CP.
* NR_PAGE_TYPE The number of page types.
* META_FLUSH Make sure the previous pages are written
* with waiting the bio's completion
* ... Only can be used with META.
*/
enum page_type {
DATA,
NODE,
META,
NR_PAGE_TYPE,
META_FLUSH,
};
struct f2fs_sb_info {
// struct super_block *sb; /* pointer to VFS super block */
// struct buffer_head *raw_super_buf; /* buffer head of raw sb */
const f2fs_super_block *raw_super; /* raw super block pointer */
int s_dirty; /* dirty flag for checkpoint */
/* for node-related operations */
struct f2fs_nm_info *nm_info; /* node manager */
// struct inode *node_inode; /* cache node blocks */
fbl::RefPtr<VnodeF2fs> node_vnode;
/* for segment-related operations */
struct f2fs_sm_info *sm_info; /* segment manager */
struct bio *bio[NR_PAGE_TYPE]; /* bios to merge */
sector_t last_block_in_bio[NR_PAGE_TYPE]; /* last block number */
// struct rw_semaphore bio_sem; /* IO semaphore */
/* for checkpoint */
struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
// struct inode *meta_inode; /* cache meta blocks */
fbl::RefPtr<VnodeF2fs> meta_vnode;
mtx_t cp_mutex; /* for checkpoint procedure */
mtx_t fs_lock[NR_LOCK_TYPE]; /* for blocking FS operations */
mtx_t write_inode; /* mutex for write inode */
mtx_t writepages; /* mutex for writepages() */
int por_doing; /* recovery is doing or not */
/* for orphan inode management */
list_node_t orphan_inode_list; /* orphan inode list */
mtx_t orphan_inode_mutex; /* for orphan inode list */
unsigned int n_orphans; /* # of orphan inodes */
/* for directory inode management */
list_node_t dir_inode_list; /* dir inode list */
spinlock_t dir_inode_lock; /* for dir inode list lock */
unsigned int n_dirty_dirs; /* # of dir inodes */
/* basic file system units */
unsigned int log_sectors_per_block; /* log2 sectors per block */
unsigned int log_blocksize; /* log2 block size */
unsigned int blocksize; /* block size */
unsigned int root_ino_num; /* root inode number*/
unsigned int node_ino_num; /* node inode number*/
unsigned int meta_ino_num; /* meta inode number*/
unsigned int log_blocks_per_seg; /* log2 blocks per segment */
unsigned int blocks_per_seg; /* blocks per segment */
unsigned int segs_per_sec; /* segments per section */
unsigned int secs_per_zone; /* sections per zone */
unsigned int total_sections; /* total section count */
unsigned int total_node_count; /* total node block count */
unsigned int total_valid_node_count; /* valid node block count */
unsigned int total_valid_inode_count; /* valid inode count */
int active_logs; /* # of active logs */
block_t user_block_count; /* # of user blocks */
block_t total_valid_block_count; /* # of valid blocks */
block_t alloc_valid_block_count; /* # of allocated blocks */
block_t last_valid_block_count; /* for recovery */
uint32_t s_next_generation; /* for NFS support */
atomic_t nr_pages[NR_COUNT_TYPE]; /* # of pages, see count_type */
struct f2fs_mount_info mount_opt; /* mount options */
/* for cleaning operations */
mtx_t gc_mutex; /* mutex for GC */
struct f2fs_gc_kthread *gc_thread; /* GC thread */
/*
* for stat information.
* one is for the LFS mode, and the other is for the SSR mode.
*/
struct f2fs_stat_info *stat_info; /* FS status information */
unsigned int segment_count[2]; /* # of allocated segments */
unsigned int block_count[2]; /* # of allocated blocks */
unsigned int last_victim[2]; /* last victim segment # */
int total_hit_ext, read_hit_ext; /* extent cache hit ratio */
int bg_gc; /* background gc calls */
spinlock_t stat_lock; /* lock for stat operations */
};
/*
* Inline functions
*/
// static inline struct f2fs_inode_info *F2FS_I(VnodeF2fs *vnode)
// {
// // TODO: IMPL
// //return container_of(inode, f2fs_inode_info, vfs_inode);
// return &vnode->fi;
// }
// static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb)
//{
// return (struct f2fs_sb_info *)sb->s_fs_info;
//}
static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi) {
return (struct f2fs_super_block *)(sbi->raw_super);
}
static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi) {
return (struct f2fs_checkpoint *)(sbi->ckpt);
}
static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi) {
return (struct f2fs_nm_info *)(sbi->nm_info);
}
static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi) {
return (struct f2fs_sm_info *)(sbi->sm_info);
}
static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi) {
return (struct sit_info *)(SM_I(sbi)->sit_info);
}
static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi) {
return (struct free_segmap_info *)(SM_I(sbi)->free_info);
}
static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi) {
return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info);
}
static inline void F2FS_SET_SB_DIRT(struct f2fs_sb_info *sbi) { sbi->s_dirty = 1; }
static inline void F2FS_RESET_SB_DIRT(struct f2fs_sb_info *sbi) { sbi->s_dirty = 0; }
static inline void mutex_lock_op(struct f2fs_sb_info *sbi, enum lock_type t)
TA_ACQ(&sbi->fs_lock[t]) {
// TODO: IMPL
// mutex_lock_nested(&sbi->fs_lock[t], t);
mtx_lock(&sbi->fs_lock[t]);
}
static inline void mutex_unlock_op(struct f2fs_sb_info *sbi, enum lock_type t)
TA_REL(&sbi->fs_lock[t]) {
mtx_unlock(&sbi->fs_lock[t]);
}
/*
* Check whether the given nid is within node id range.
*/
static inline void CheckNidRange(struct f2fs_sb_info *sbi, nid_t nid) {
// TODO: IMPL
// BUG_ON((nid >= NM_I(sbi)->max_nid));
}
#define F2FS_DEFAULT_ALLOCATED_BLOCKS 1
/*
* Check whether the inode has blocks or not
*/
// [[maybe_unused]]
// static inline int F2FS_HAS_BLOCKS(struct inode *inode)
// {
// if (F2FS_I(inode)->i_xattr_nid)
// return (inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS + 1);
// else
// return (inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS);
// }
static inline int F2FS_HAS_BLOCKS(void *vnode) { return 1; }
// [[maybe_unused]]
// static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi,
// struct inode *inode, blkcnt_t count)
// {
// block_t valid_block_count;
// //TODO: IMPL
// //SpinLock(&sbi->stat_lock);
// valid_block_count =
// sbi->total_valid_block_count + (block_t)count;
// if (valid_block_count > sbi->user_block_count) {
// //TODO: IMPL
// //SpinUnlock(&sbi->stat_lock);
// return false;
// }
// inode->i_blocks += count;
// sbi->total_valid_block_count = valid_block_count;
// sbi->alloc_valid_block_count += (block_t)count;
// //TODO: IMPL
// //SpinUnlock(&sbi->stat_lock);
// return true;
// }
static inline int dec_valid_block_count(struct f2fs_sb_info *sbi, void *vnode, blkcnt_t count) {
SpinLock(&sbi->stat_lock);
// BUG_ON(sbi->total_valid_block_count < (block_t) count);
// BUG_ON(inode->i_blocks < count);
// inode->i_blocks -= count;
sbi->total_valid_block_count -= (block_t)count;
SpinUnlock(&sbi->stat_lock);
return 0;
}
[[maybe_unused]] static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type) {
// TODO: IMPL
// AtomicInc(&sbi->nr_pages[count_type]);
F2FS_SET_SB_DIRT(sbi);
}
static inline void InodeIncDirtyDents(VnodeF2fs *vnode) {
// //TODO: IMPL
// //AtomicInc(&F2FS_I(inode)->dirty_dents);
}
static inline void DecPageCount(struct f2fs_sb_info *sbi, int count_type) {
// TODO: IMPL
// AtomicDec(&sbi->nr_pages[count_type]);
}
static inline void InodeDecDirtyDents(void *vnode) {
// //TODO: IMPL
// //AtomicDec(&F2FS_I(inode)->dirty_dents);
}
static inline int get_pages(struct f2fs_sb_info *sbi, int count_type) {
// TODO: IMPL
// return AtomicRead(&sbi->nr_pages[count_type]);
return 0;
}
static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi) {
block_t ret;
SpinLock(&sbi->stat_lock);
ret = sbi->total_valid_block_count;
SpinUnlock(&sbi->stat_lock);
return ret;
}
static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag) {
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
/* return NAT or SIT bitmap */
if (flag == NAT_BITMAP)
return LeToCpu(ckpt->nat_ver_bitmap_bytesize);
else if (flag == SIT_BITMAP)
return LeToCpu(ckpt->sit_ver_bitmap_bytesize);
return 0;
}
static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) {
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
int offset = (flag == NAT_BITMAP) ? ckpt->sit_ver_bitmap_bytesize : 0;
return &ckpt->sit_nat_version_bitmap + offset;
}
static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi) {
block_t start_addr;
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
unsigned long long ckpt_version = LeToCpu(ckpt->checkpoint_ver);
start_addr = LeToCpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
/*
* odd numbered checkpoint should at cp segment 0
* and even segent must be at cp segment 1
*/
if (!(ckpt_version & 1))
start_addr += sbi->blocks_per_seg;
return start_addr;
}
[[maybe_unused]] static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi) {
return LeToCpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
}
[[maybe_unused]] static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi) {
unsigned int ret;
SpinLock(&sbi->stat_lock);
ret = sbi->total_valid_node_count;
SpinUnlock(&sbi->stat_lock);
return ret;
}
static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi) {
SpinLock(&sbi->stat_lock);
// TODO: IMPL
// BUG_ON(sbi->total_valid_inode_count == sbi->total_node_count);
sbi->total_valid_inode_count++;
SpinUnlock(&sbi->stat_lock);
}
[[maybe_unused]] static inline int dec_valid_inode_count(struct f2fs_sb_info *sbi) {
SpinLock(&sbi->stat_lock);
// TODO: IMPL
// BUG_ON(!sbi->total_valid_inode_count);
sbi->total_valid_inode_count--;
SpinUnlock(&sbi->stat_lock);
return 0;
}
[[maybe_unused]] static inline unsigned int valid_inode_count(struct f2fs_sb_info *sbi) {
unsigned int ret;
SpinLock(&sbi->stat_lock);
ret = sbi->total_valid_inode_count;
SpinUnlock(&sbi->stat_lock);
return ret;
}
inline void F2fsPutPage(struct Page *page, int unlock) {
if (page != nullptr)
delete page;
}
static inline void F2fsPutDnode(struct dnode_of_data *dn) {
// TODO: IMPL
if (dn->node_page)
F2fsPutPage(dn->node_page, 1);
if (dn->inode_page && dn->node_page != dn->inode_page)
F2fsPutPage(dn->inode_page, 0);
dn->node_page = NULL;
dn->inode_page = NULL;
}
[[maybe_unused]] static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
size_t size,
void (*ctor)(void *)) {
return nullptr;
}
#define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino)
static inline bool IS_INODE(struct Page *page) {
struct f2fs_node *p = (struct f2fs_node *)PageAddress(page);
return RAW_IS_INODE(p);
}
static inline uint32_t *blkaddr_in_node(struct f2fs_node *node) {
return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr;
}
static inline block_t datablock_addr(Page *node_page, unsigned int offset) {
struct f2fs_node *raw_node;
uint32_t *addr_array;
raw_node = (struct f2fs_node *)PageAddress(node_page);
addr_array = blkaddr_in_node(raw_node);
return LeToCpu(addr_array[offset]);
}
static inline int f2fs_test_bit(unsigned int nr, char *addr) {
int mask;
addr += (nr >> 3);
mask = 1 << (7 - (nr & 0x07));
return mask & *addr;
}
static inline int f2fs_set_bit(unsigned int nr, char *addr) {
int mask;
int ret;
addr += (nr >> 3);
mask = 1 << (7 - (nr & 0x07));
ret = mask & *addr;
*addr |= mask;
return ret;
}
static inline int f2fs_clear_bit(unsigned int nr, char *addr) {
int mask;
int ret;
addr += (nr >> 3);
mask = 1 << (7 - (nr & 0x07));
ret = mask & *addr;
*addr &= ~mask;
return ret;
}
/* used for f2fs_inode_info->flags */
enum {
FI_NEW_INODE, /* indicate newly allocated inode */
FI_NEED_CP, /* need to do checkpoint during fsync */
FI_INC_LINK, /* need to increment i_nlink */
FI_ACL_MODE, /* indicate acl mode */
FI_NO_ALLOC, /* should not allocate any blocks */
};
static inline void SetInodeFlag(struct f2fs_inode_info *fi, int flag) { set_bit(flag, &fi->flags); }
static inline int is_inode_flag_set(struct f2fs_inode_info *fi, int flag) {
return test_bit(flag, &fi->flags);
}
static inline void ClearInodeFlag(struct f2fs_inode_info *fi, int flag) {
clear_bit(flag, &fi->flags);
}
[[maybe_unused]] static inline void set_acl_inode(struct f2fs_inode_info *fi, umode_t mode) {
fi->i_acl_mode = mode;
SetInodeFlag(fi, FI_ACL_MODE);
}
[[maybe_unused]] static inline int CondClearInodeFlag(struct f2fs_inode_info *fi, int flag) {
if (is_inode_flag_set(fi, FI_ACL_MODE)) {
ClearInodeFlag(fi, FI_ACL_MODE);
return 1;
}
return 0;
}
/*
* gc.c
*/
int start_gc_thread(struct f2fs_sb_info *);
void stop_gc_thread(struct f2fs_sb_info *);
block_t StartBidxOfNode(unsigned int);
int f2fs_gc(struct f2fs_sb_info *, int);
void build_gc_manager(struct f2fs_sb_info *);
int create_gc_caches(void);
void destroy_gc_caches(void);
/*
* debug.c
*/
#ifdef CONFIG_F2FS_STAT_FS
struct f2fs_stat_info {
list_node_t stat_list;
struct f2fs_sb_info *sbi;
mtx_t stat_lock;
int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
int main_area_segs, main_area_sections, main_area_zones;
int hit_ext, total_ext;
int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta;
int nats, sits, fnids;
int total_count, utilization;
int bg_gc;
unsigned int valid_count, valid_node_count, valid_inode_count;
unsigned int bimodal, avg_vblocks;
int util_free, util_valid, util_invalid;
int rsvd_segs, overp_segs;
int dirty_count, node_pages, meta_pages;
int prefree_count, call_count;
int tot_segs, node_segs, data_segs, free_segs, free_secs;
int tot_blks, data_blks, node_blks;
int curseg[NR_CURSEG_TYPE];
int cursec[NR_CURSEG_TYPE];
int curzone[NR_CURSEG_TYPE];
unsigned int segment_count[2];
unsigned int block_count[2];
unsigned base_mem, cache_mem;
};
#define stat_inc_call_count(si) ((si)->call_count++)
#define stat_inc_seg_count(sbi, type) \
do { \
struct f2fs_stat_info *si = sbi->stat_info; \
(si)->tot_segs++; \
if (type == SUM_TYPE_DATA) \
si->data_segs++; \
else \
si->node_segs++; \
} while (0)
#define stat_inc_tot_blk_count(si, blks) (si->tot_blks += (blks))
#define stat_inc_data_blk_count(sbi, blks) \
do { \
struct f2fs_stat_info *si = sbi->stat_info; \
stat_inc_tot_blk_count(si, blks); \
si->data_blks += (blks); \
} while (0)
#define stat_inc_node_blk_count(sbi, blks) \
do { \
struct f2fs_stat_info *si = sbi->stat_info; \
stat_inc_tot_blk_count(si, blks); \
si->node_blks += (blks); \
} while (0)
int f2fs_build_stats(struct f2fs_sb_info *);
void f2fs_destroy_stats(struct f2fs_sb_info *);
void destroy_root_stats(void);
#else
#define stat_inc_call_count(si)
#define stat_inc_seg_count(si, type)
#define stat_inc_tot_blk_count(si, blks)
#define stat_inc_data_blk_count(si, blks)
#define stat_inc_node_blk_count(sbi, blks)
static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) {}
[[maybe_unused]] static inline void destroy_root_stats(void) {}
#endif
} // namespace f2fs
#endif // F2FS_INTERNAL_H_