blob: e9b2fa65be82672618538bcae5e4b08168856221 [file] [log] [blame]
// Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "f2fs.h"
#include <typeinfo> // operator typeid
namespace f2fs {
/**
* We guarantee no failure on the returned page.
*/
Page *F2fs::GrabMetaPage(pgoff_t index) {
Page *page;
repeat:
page = grab_cache_page(nullptr, F2FS_META_INO(sbi_), index);
if (!page) {
#if 0 // porting needed
// cond_resched();
#endif
goto repeat;
}
/* We wait writeback only inside grab_meta_page() */
wait_on_page_writeback(page);
SetPageUptodate(page);
return page;
}
Page *F2fs::GetMetaPage(pgoff_t index) {
Page *page;
repeat:
page = grab_cache_page(nullptr, F2FS_META_INO(sbi_), index);
if (!page) {
#if 0 // porting needed
// cond_resched();
#endif
goto repeat;
}
if (VnodeF2fs::F2fsReadpage(this, (struct Page *)page_address(page), index, READ_SYNC)) {
F2fsPutPage(page, 1);
goto repeat;
}
#if 0 // porting needed
// mark_page_accessed(page);
#endif
/* We do not allow returning an errorneous page */
return page;
}
int F2fs::F2fsWriteMetaPage(Page *page, struct writeback_control *wbc) {
int err;
wait_on_page_writeback(page);
err = this->Segmgr().WriteMetaPage(page, wbc);
if (err) {
#if 0 // porting needed
// wbc->pages_skipped++;
// set_page_dirty(page, this);
#else
FlushDirtyMetaPage(this, page);
#endif
}
DecPageCount(&SbInfo(), F2FS_DIRTY_META);
/* In this case, we should not unlock this page */
#if 0 // porting needed
// if (err != AOP_WRITEPAGE_ACTIVATE)
// unlock_page(page);
#endif
return err;
}
#if 0 // porting needed
// int F2fs::F2fsWriteMetaPages(struct address_space *mapping, struct writeback_control *wbc) {
// struct block_device *bdev = sbi_->sb->s_bdev;
// long written;
// if (wbc->for_kupdate)
// return 0;
// if (get_pages(sbi_, F2FS_DIRTY_META) == 0)
// return 0;
// /* if mounting is failed, skip writing node pages */
// mtx_lock(&sbi_->cp_mutex);
// written = sync_meta_pages(sbi_.get(), META, bio_get_nr_vecs(bdev));
// mtx_unlock(&sbi_->cp_mutex);
// wbc->nr_to_write -= written;
// return 0;
// }
#endif
long F2fs::SyncMetaPages(enum page_type type, long nr_to_write) {
#if 0 // porting needed
// struct address_space *mapping = sbi->meta_inode->i_mapping;
// pgoff_t index = 0, end = LONG_MAX;
// struct pagevec pvec;
// long nwritten = 0;
// struct writeback_control wbc = {
// .for_reclaim = 0,
// };
// pagevec_init(&pvec, 0);
// while (index <= end) {
// int i, nr_pages;
// nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
// PAGECACHE_TAG_DIRTY,
// min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
// if (nr_pages == 0)
// break;
// for (i = 0; i < nr_pages; i++) {
// struct page *page = pvec.pages[i];
// lock_page(page);
// BUG_ON(page->mapping != mapping);
// BUG_ON(!PageDirty(page));
// clear_page_dirty_for_io(page);
// f2fs_write_meta_page(page, &wbc);
// if (nwritten++ >= nr_to_write)
// break;
// }
// pagevec_release(&pvec);
// cond_resched();
// }
// if (nwritten)
// f2fs_submit_bio(sbi, type, nr_to_write == LONG_MAX);
// return nwritten;
#else
return 0;
#endif
}
#if 0 // porting needed
// int F2fs::F2fsSetMetaPageDirty(Page *page) {
// SetPageUptodate(page);
// if (!PageDirty(page)) {
// // __set_page_dirty_nobuffers(page);
// FlushDirtyMetaPage(this, page);
// inc_page_count(&SbInfo(), F2FS_DIRTY_META);
// F2FS_SET_SB_DIRT(&SbInfo());
// return 1;
// }
// return 0;
// }
#endif
int F2fs::CheckOrphanSpace() {
f2fs_sb_info &sbi = SbInfo();
unsigned int max_orphans;
int err = 0;
/*
* considering 512 blocks in a segment 5 blocks are needed for cp
* and log segment summaries. Remaining blocks are used to keep
* orphan entries with the limitation one reserved segment
* for cp pack we can have max 1020*507 orphan entries
*/
max_orphans = (sbi.blocks_per_seg - 5) * F2FS_ORPHANS_PER_BLOCK;
mtx_lock(&sbi.orphan_inode_mutex);
if (sbi.n_orphans >= max_orphans)
err = -ENOSPC;
mtx_unlock(&sbi.orphan_inode_mutex);
return err;
}
void F2fs::AddOrphanInode(nid_t ino) {
f2fs_sb_info &sbi = SbInfo();
list_node_t *head, *this_node;
struct orphan_inode_entry *new_entry = NULL, *orphan = NULL;
mtx_lock(&sbi.orphan_inode_mutex);
head = &sbi.orphan_inode_list;
list_for_every(head, this_node) {
orphan = containerof(this_node, struct orphan_inode_entry, list);
if (orphan->ino == ino)
goto out;
if (orphan->ino > ino)
break;
orphan = NULL;
}
retry:
#if 0 // porting needed
// new_entry = kmem_cache_alloc(orphan_entry_slab, GFP_ATOMIC);
#else
new_entry = new orphan_inode_entry;
#endif
if (!new_entry) {
#if 0 // porting needed
// cond_resched();
#endif
goto retry;
}
new_entry->ino = ino;
list_initialize(&new_entry->list);
/* add new_oentry into list which is sorted by inode number */
if (orphan) {
struct orphan_inode_entry *prev;
/* get previous entry */
prev = containerof(orphan->list.prev, struct orphan_inode_entry, list);
if (&prev->list != head)
/* insert new orphan inode entry */
list_add(&prev->list, &new_entry->list);
else
list_add(head, &new_entry->list);
} else {
list_add_tail(head, &new_entry->list);
}
sbi.n_orphans++;
out:
mtx_unlock(&sbi.orphan_inode_mutex);
}
void F2fs::RemoveOrphanInode(nid_t ino) {
f2fs_sb_info &sbi = SbInfo();
list_node_t *this_node, *next, *head;
struct orphan_inode_entry *orphan;
mtx_lock(&sbi.orphan_inode_mutex);
head = &sbi.orphan_inode_list;
list_for_every_safe(head, this_node, next) {
orphan = containerof(this_node, struct orphan_inode_entry, list);
if (orphan->ino == ino) {
list_delete(&orphan->list);
#if 0 // porting needed
// kmem_cache_free(orphan_entry_slab, orphan);
#endif
delete orphan;
sbi.n_orphans--;
break;
}
}
mtx_unlock(&sbi.orphan_inode_mutex);
}
void F2fs::RecoverOrphanInode(nid_t ino) {
fbl::RefPtr<VnodeF2fs> vnode;
zx_status_t ret;
ret = VnodeF2fs::F2fsVget(this, ino, &vnode);
ZX_ASSERT(ret == ZX_OK);
vnode->ClearNlink();
/* truncate all the data during iput */
iput(vnode.get());
vnode.reset();
}
int F2fs::RecoverOrphanInodes() {
f2fs_sb_info &sbi = SbInfo();
block_t start_blk, orphan_blkaddr, i, j;
if (!(F2FS_CKPT(&sbi)->ckpt_flags & CP_ORPHAN_PRESENT_FLAG))
return 0;
sbi.por_doing = 1;
start_blk = __start_cp_addr(&sbi) + 1;
orphan_blkaddr = __start_sum_addr(&sbi) - 1;
for (i = 0; i < orphan_blkaddr; i++) {
Page *page = GetMetaPage(start_blk + i);
struct f2fs_orphan_block *orphan_blk;
orphan_blk = (struct f2fs_orphan_block *)page_address(page);
for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
RecoverOrphanInode(ino);
}
F2fsPutPage(page, 1);
}
/* clear Orphan Flag */
F2FS_CKPT(&sbi)->ckpt_flags &= (~CP_ORPHAN_PRESENT_FLAG);
sbi.por_doing = 0;
return 0;
}
void F2fs::WriteOrphanInodes(block_t start_blk) {
f2fs_sb_info &sbi = SbInfo();
list_node_t *head, *this_node, *next;
struct f2fs_orphan_block *orphan_blk = NULL;
Page *page = NULL;
unsigned int nentries = 0;
unsigned short index = 1;
unsigned short orphan_blocks;
orphan_blocks =
(unsigned short)((sbi.n_orphans + (F2FS_ORPHANS_PER_BLOCK - 1)) / F2FS_ORPHANS_PER_BLOCK);
mtx_lock(&sbi.orphan_inode_mutex);
head = &sbi.orphan_inode_list;
/* loop for each orphan inode entry and write them in Jornal block */
list_for_every_safe(head, this_node, next) {
struct orphan_inode_entry *orphan;
orphan = containerof(this_node, struct orphan_inode_entry, list);
if (nentries == F2FS_ORPHANS_PER_BLOCK) {
/*
* an orphan block is full of 1020 entries,
* then we need to flush current orphan blocks
* and bring another one in memory
*/
orphan_blk->blk_addr = cpu_to_le16(index);
orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
orphan_blk->entry_count = cpu_to_le32(nentries);
#if 0 // porting needed
// set_page_dirty(page, this);
#else
FlushDirtyMetaPage(this, page);
#endif
F2fsPutPage(page, 1);
index++;
start_blk++;
nentries = 0;
page = NULL;
}
if (page)
goto page_exist;
page = GrabMetaPage(start_blk);
orphan_blk = (struct f2fs_orphan_block *)page_address(page);
memset(orphan_blk, 0, sizeof(*orphan_blk));
page_exist:
orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino);
}
if (!page)
goto end;
orphan_blk->blk_addr = cpu_to_le16(index);
orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
orphan_blk->entry_count = cpu_to_le32(nentries);
#if 0 // porting needed
// set_page_dirty(page, this);
#else
FlushDirtyMetaPage(this, page);
#endif
F2fsPutPage(page, 1);
end:
mtx_unlock(&sbi.orphan_inode_mutex);
}
Page *F2fs::ValidateCheckpoint(block_t cp_addr, unsigned long long *version) {
Page *cp_page_1, *cp_page_2;
unsigned long blk_size = sbi_->blocksize;
struct f2fs_checkpoint *cp_block;
unsigned long long cur_version = 0, pre_version = 0;
unsigned int crc = 0;
size_t crc_offset;
/* Read the 1st cp block in this CP pack */
cp_page_1 = GetMetaPage(cp_addr);
/* get the version number */
cp_block = (struct f2fs_checkpoint *)page_address(cp_page_1);
crc_offset = le32_to_cpu(cp_block->checksum_offset);
if (crc_offset >= blk_size)
goto invalid_cp1;
crc = *(unsigned int *)((unsigned char *)cp_block + crc_offset);
if (!f2fs_crc_valid(crc, cp_block, crc_offset))
goto invalid_cp1;
pre_version = le64_to_cpu(cp_block->checkpoint_ver);
/* Read the 2nd cp block in this CP pack */
cp_addr += le64_to_cpu(cp_block->cp_pack_total_block_count) - 1;
cp_page_2 = GetMetaPage(cp_addr);
cp_block = (struct f2fs_checkpoint *)page_address(cp_page_2);
crc_offset = le32_to_cpu(cp_block->checksum_offset);
if (crc_offset >= blk_size)
goto invalid_cp2;
crc = *(unsigned int *)((unsigned char *)cp_block + crc_offset);
if (!f2fs_crc_valid(crc, cp_block, crc_offset))
goto invalid_cp2;
cur_version = le64_to_cpu(cp_block->checkpoint_ver);
if (cur_version == pre_version) {
*version = cur_version;
F2fsPutPage(cp_page_2, 1);
return cp_page_1;
}
invalid_cp2:
F2fsPutPage(cp_page_2, 1);
invalid_cp1:
F2fsPutPage(cp_page_1, 1);
return nullptr;
}
zx_status_t F2fs::GetValidCheckpoint() {
f2fs_checkpoint *cp_block;
f2fs_super_block &fsb = RawSb();
Page *cp1, *cp2, *cur_page;
unsigned long blk_size = sbi_->blocksize;
unsigned long long cp1_version = 0, cp2_version = 0;
unsigned long long cp_start_blk_no;
sbi_->ckpt = (f2fs_checkpoint *)malloc(blk_size);
if (!sbi_->ckpt)
return -ENOMEM;
/*
* Finding out valid cp block involves read both
* sets( cp pack1 and cp pack 2)
*/
cp_start_blk_no = le32_to_cpu(fsb.cp_blkaddr);
cp1 = ValidateCheckpoint(cp_start_blk_no, &cp1_version);
/* The second checkpoint pack should start at the next segment */
cp_start_blk_no += 1 << le32_to_cpu(fsb.log_blocks_per_seg);
cp2 = ValidateCheckpoint(cp_start_blk_no, &cp2_version);
if (cp1 && cp2) {
if (ver_after(cp2_version, cp1_version))
cur_page = cp2;
else
cur_page = cp1;
} else if (cp1) {
cur_page = cp1;
} else if (cp2) {
cur_page = cp2;
} else {
goto fail_no_cp;
}
cp_block = (f2fs_checkpoint *)page_address(cur_page);
memcpy(sbi_->ckpt, cp_block, blk_size);
#ifdef F2FS_BU_DEBUG
int i;
std::cout << std::endl << "F2fs::GetValidCheckpoint" << std::endl;
for(i = 0; i < MAX_ACTIVE_NODE_LOGS; i++) {
std::cout << "[" << i << "] cur_node_segno "<< cp_block->cur_node_segno[i]
<< ", cur_node_blkoff="
<< cp_block->cur_node_blkoff[i]
<< std::endl;
}
for(i = 0; i < MAX_ACTIVE_NODE_LOGS; i++) {
std::cout << "[" << i << "] cur_data_segno "<< cp_block->cur_data_segno[i]
<< ", cur_data_blkoff="
<< cp_block->cur_data_blkoff[i]
<< std::endl;
}
#endif
F2fsPutPage(cp1, 1);
F2fsPutPage(cp2, 1);
return 0;
fail_no_cp:
free(sbi_->ckpt);
return -EINVAL;
}
#if 0 // porting needed
// void F2fs::SetDirtyDirPage(VnodeF2fs *vnode, Page *page) {
// f2fs_sb_info &sbi = SbInfo();
// list_node_t *head = &sbi.dir_inode_list;
// struct dir_inode_entry *new_entry;
// list_node_t *this_node;
// if (!S_ISDIR(vnode->i_mode))
// return;
// retry:
// // new = kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
// new_entry = new dir_inode_entry;
// if (!new_entry) {
// // cond_resched();
// goto retry;
// }
// new_entry->vnode = vnode;
// list_initialize(&new_entry->list);
// spin_lock(&sbi.dir_inode_lock);
// list_for_every(head, this_node) {
// struct dir_inode_entry *entry;
// entry = containerof(this_node, struct dir_inode_entry, list);
// if (entry->vnode == vnode) {
// // kmem_cache_free(inode_entry_slab, new_entry);
// delete new_entry;
// goto out;
// }
// }
// list_add_tail(&new_entry->list, head);
// sbi.n_dirty_dirs++;
// BUG_ON(!S_ISDIR(inode->i_mode));
// out:
// inc_page_count(&sbi, F2FS_DIRTY_DENTS);
// InodeIncDirtyDents(vnode);
// // SetPagePrivate(page);
// spin_unlock(&sbi.dir_inode_lock);
// }
// void F2fs::RemoveDirtyDirInode(VnodeF2fs *vnode) {
// f2fs_sb_info &sbi = SbInfo();
// list_node_t *head = &sbi.dir_inode_list;
// list_node_t *this_node;
// if (!S_ISDIR(vnode->i_mode))
// return;
// spin_lock(&sbi.dir_inode_lock);
// // if (atomic_read(&F2FS_I(vnode)->dirty_dents))
// if (vnode->fi.dirty_dents)
// goto out;
// list_for_every(head, this_node) {
// struct dir_inode_entry *entry;
// entry = containerof(this_node, struct dir_inode_entry, list);
// if (entry->vnode == vnode) {
// list_delete(&entry->list);
// // kmem_cache_free(inode_entry_slab, entry);
// delete entry;
// sbi.n_dirty_dirs--;
// break;
// }
// }
// out:
// spin_unlock(&sbi.dir_inode_lock);
// }
#endif
void F2fs::SyncDirtyDirInodes() {
f2fs_sb_info &sbi = SbInfo();
list_node_t *head = &sbi.dir_inode_list;
struct dir_inode_entry *entry;
fbl::RefPtr<VnodeF2fs> vnode;
retry:
spin_lock(&sbi.dir_inode_lock);
if (list_is_empty(head)) {
spin_unlock(&sbi.dir_inode_lock);
return;
}
entry = containerof(head->next, struct dir_inode_entry, list);
vnode.reset((VnodeF2fs *)igrab(entry->vnode));
spin_unlock(&sbi.dir_inode_lock);
if (vnode) {
#if 0 // porting needed
// filemap_flush(vnode->i_mapping);
#endif
iput(vnode.get());
vnode.reset();
} else {
/*
* We should submit bio, since it exists several
* wribacking dentry pages in the freeing inode.
*/
// TODO(unknown): bio[type] is empty
// Segmgr().F2fsSubmitBio(DATA, true);
}
goto retry;
}
/**
* Freeze all the FS-operations for checkpoint.
*/
void F2fs::BlockOperations() TA_NO_THREAD_SAFETY_ANALYSIS {
f2fs_sb_info &sbi = SbInfo();
int t;
struct writeback_control wbc = {
#if 0 // porting needed
// .nr_to_write = LONG_MAX,
// .sync_mode = WB_SYNC_ALL,
// .for_reclaim = 0,
#endif
};
/* Stop renaming operation */
mutex_lock_op(&sbi, RENAME);
mutex_lock_op(&sbi, DENTRY_OPS);
retry_dents:
/* write all the dirty dentry pages */
SyncDirtyDirInodes();
mutex_lock_op(&sbi, DATA_WRITE);
if (get_pages(&sbi, F2FS_DIRTY_DENTS)) {
mutex_unlock_op(&sbi, DATA_WRITE);
goto retry_dents;
}
/* block all the operations */
for (t = DATA_NEW; t <= NODE_TRUNC; t++)
mutex_lock_op(&sbi, (lock_type)t);
mtx_lock(&sbi.write_inode);
/*
* POR: we should ensure that there is no dirty node pages
* until finishing nat/sit flush.
*/
retry:
Nodemgr().SyncNodePages(0, &wbc);
mutex_lock_op(&sbi, NODE_WRITE);
if (get_pages(&sbi, F2FS_DIRTY_NODES)) {
mutex_unlock_op(&sbi, NODE_WRITE);
goto retry;
}
mtx_unlock(&sbi.write_inode);
}
void F2fs::UnblockOperations() TA_NO_THREAD_SAFETY_ANALYSIS {
f2fs_sb_info &sbi = SbInfo();
int t;
for (t = NODE_WRITE; t >= RENAME; t--)
mutex_unlock_op(&sbi, (lock_type)t);
}
void F2fs::DoCheckpoint(bool is_umount) {
f2fs_sb_info &sbi = SbInfo();
struct f2fs_checkpoint *ckpt = F2FS_CKPT(&sbi);
nid_t last_nid = 0;
block_t start_blk;
Page *cp_page;
unsigned int data_sum_blocks, orphan_blocks;
void *kaddr;
__u32 crc32 = 0;
int i;
/* Flush all the NAT/SIT pages */
while (get_pages(&sbi, F2FS_DIRTY_META))
SyncMetaPages(META, LONG_MAX);
Nodemgr().NextFreeNid(&last_nid);
/*
* modify checkpoint
* version number is already updated
*/
ckpt->elapsed_time = cpu_to_le64(Segmgr().GetMtime());
ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(&sbi));
ckpt->free_segment_count = cpu_to_le32(Segmgr().FreeSegments());
for (i = 0; i < 3; i++) {
ckpt->cur_node_segno[i] = cpu_to_le32(Segmgr().CursegSegno(i + CURSEG_HOT_NODE));
ckpt->cur_node_blkoff[i] = cpu_to_le16(Segmgr().CursegBlkoff(i + CURSEG_HOT_NODE));
ckpt->alloc_type[i + CURSEG_HOT_NODE] = Segmgr().CursegAllocType(i + CURSEG_HOT_NODE);
}
for (i = 0; i < 3; i++) {
ckpt->cur_data_segno[i] = cpu_to_le32(Segmgr().CursegSegno(i + CURSEG_HOT_DATA));
ckpt->cur_data_blkoff[i] = cpu_to_le16(Segmgr().CursegBlkoff(i + CURSEG_HOT_DATA));
ckpt->alloc_type[i + CURSEG_HOT_DATA] = Segmgr().CursegAllocType(i + CURSEG_HOT_DATA);
#ifdef F2FS_BU_DEBUG
std::cout << std::endl << "F2fs::DoCheckpoint " << std::endl;
std::cout << "[" << i << "] cur_data_segno "<< ckpt->cur_data_segno[i]
<< ", cur_data_blkoff=" << ckpt->cur_data_blkoff[i]
<< std::endl;
std::cout << "[" << i << "] cur_node_segno "<< ckpt->cur_node_segno[i]
<< ", cur_node_blkoff=" << ckpt->cur_node_blkoff[i]
<< std::endl;
#endif
}
ckpt->valid_node_count = cpu_to_le32(valid_node_count(&sbi));
ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(&sbi));
ckpt->next_free_nid = cpu_to_le32(last_nid);
/* 2 cp + n data seg summary + orphan inode blocks */
data_sum_blocks = Segmgr().NpagesForSummaryFlush();
if (data_sum_blocks < 3)
ckpt->ckpt_flags |= CP_COMPACT_SUM_FLAG;
else
ckpt->ckpt_flags &= (~CP_COMPACT_SUM_FLAG);
orphan_blocks = (sbi.n_orphans + F2FS_ORPHANS_PER_BLOCK - 1) / F2FS_ORPHANS_PER_BLOCK;
ckpt->cp_pack_start_sum = 1 + orphan_blocks;
ckpt->cp_pack_total_block_count = 2 + data_sum_blocks + orphan_blocks;
if (is_umount) {
ckpt->ckpt_flags |= CP_UMOUNT_FLAG;
ckpt->cp_pack_total_block_count += NR_CURSEG_NODE_TYPE;
} else {
ckpt->ckpt_flags &= (~CP_UMOUNT_FLAG);
}
if (sbi.n_orphans)
ckpt->ckpt_flags |= CP_ORPHAN_PRESENT_FLAG;
else
ckpt->ckpt_flags &= (~CP_ORPHAN_PRESENT_FLAG);
/* update SIT/NAT bitmap */
Segmgr().GetSitBitmap(__bitmap_ptr(&sbi, SIT_BITMAP));
Nodemgr().GetNatBitmap(__bitmap_ptr(&sbi, NAT_BITMAP));
crc32 = f2fs_crc32(ckpt, le32_to_cpu(ckpt->checksum_offset));
*(__u32 *)((unsigned char *)ckpt + le32_to_cpu(ckpt->checksum_offset)) = cpu_to_le32(crc32);
start_blk = __start_cp_addr(&sbi);
/* write out checkpoint buffer at block 0 */
cp_page = GrabMetaPage(start_blk++);
kaddr = page_address(cp_page);
memcpy(kaddr, ckpt, (1 << sbi.log_blocksize));
#if 0 // porting needed
// set_page_dirty(cp_page, this);
#else
FlushDirtyMetaPage(this, cp_page);
#endif
F2fsPutPage(cp_page, 1);
if (sbi.n_orphans) {
WriteOrphanInodes(start_blk);
start_blk += orphan_blocks;
}
Segmgr().WriteDataSummaries(start_blk);
start_blk += data_sum_blocks;
if (is_umount) {
Segmgr().WriteNodeSummaries(start_blk);
start_blk += NR_CURSEG_NODE_TYPE;
}
/* writeout checkpoint block */
cp_page = GrabMetaPage(start_blk);
kaddr = page_address(cp_page);
memcpy(kaddr, ckpt, (1 << sbi.log_blocksize));
#if 0 // porting needed
// set_page_dirty(cp_page, this);
#else
FlushDirtyMetaPage(this, cp_page);
#endif
F2fsPutPage(cp_page, 1);
/* wait for previous submitted node/meta pages writeback */
#if 0 // porting needed
// while (get_pages(&sbi, F2FS_WRITEBACK))
// congestion_wait(BLK_RW_ASYNC, HZ / 50);
// filemap_fdatawait_range(sbi.node_inode->i_mapping, 0, LONG_MAX);
// filemap_fdatawait_range(sbi.meta_inode->i_mapping, 0, LONG_MAX);
#endif
/* update user_block_counts */
sbi.last_valid_block_count = sbi.total_valid_block_count;
sbi.alloc_valid_block_count = 0;
/* Here, we only have one bio having CP pack */
#if 0 // porting needed
// if (sbi.ckpt->ckpt_flags & CP_ERROR_FLAG)
// sbi->sb->s_flags |= MS_RDONLY;
// else
#endif
SyncMetaPages(META_FLUSH, LONG_MAX);
Segmgr().ClearPrefreeSegments();
F2FS_RESET_SB_DIRT(&sbi);
}
/**
* We guarantee that this checkpoint procedure should not fail.
*/
void F2fs::WriteCheckpoint(bool blocked, bool is_umount) {
f2fs_sb_info &sbi = SbInfo();
struct f2fs_checkpoint *ckpt = F2FS_CKPT(&sbi);
unsigned long long ckpt_ver;
// TODO(unknown): Need to confirm if blocked is true
// if (!blocked) {
mtx_lock(&sbi.cp_mutex);
BlockOperations();
//}
#if 0 // porting needed (bio[type] is empty)
// Segmgr().F2fsSubmitBio(DATA, true);
// Segmgr().F2fsSubmitBio(NODE, true);
// Segmgr().F2fsSubmitBio(META, true);
#endif
/*
* update checkpoint pack index
* Increase the version number so that
* SIT entries and seg summaries are written at correct place
*/
ckpt_ver = le64_to_cpu(ckpt->checkpoint_ver);
ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver);
/* write cached NAT/SIT entries to NAT/SIT area */
Nodemgr().FlushNatEntries();
Segmgr().FlushSitEntries();
Segmgr().ResetVictimSegmap();
/* unlock all the fs_lock[] in do_checkpoint() */
DoCheckpoint(is_umount);
UnblockOperations();
mtx_unlock(&sbi.cp_mutex);
}
void F2fs::InitOrphanInfo() {
f2fs_sb_info &sbi = SbInfo();
mtx_init(&sbi.orphan_inode_mutex, mtx_plain);
list_initialize(&sbi.orphan_inode_list);
sbi.n_orphans = 0;
}
#if 0 // porting needed
// int F2fs::CreateCheckpointCaches() {
// orphan_entry_slab = f2fs_kmem_cache_create("f2fs_orphan_entry",
// sizeof(struct orphan_inode_entry), NULL);
// if (unlikely(!orphan_entry_slab))
// return -ENOMEM;
// inode_entry_slab = f2fs_kmem_cache_create("f2fs_dirty_dir_entry",
// sizeof(struct dir_inode_entry), NULL);
// if (unlikely(!inode_entry_slab)) {
// kmem_cache_destroy(orphan_entry_slab);
// return -ENOMEM;
// }
// return 0;
// }
// void F2fs::DestroyCheckpointCaches(void) {
// // kmem_cache_destroy(orphan_entry_slab);
// // kmem_cache_destroy(inode_entry_slab);
// }
#endif
} // namespace f2fs