blob: a051cf66861f2d367996f244bebd2f277268613b [file] [log] [blame]
// Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef F2FS_LIB_H_
#define F2FS_LIB_H_
namespace f2fs {
#define F2FS_SUPER_MAGIC 0xF2F52010
#define PAGE_SIZE 4096
#define PAGE_CACHE_SIZE 4096
#define BITS_PER_BYTE 8
#define PAGE_CACHE_MASK (PAGE_SIZE - 1)
#define CRCPOLY_LE 0xedb88320
#define AOP_WRITEPAGE_ACTIVATE 0x80000
/*
* Page cache helper
* TODO: Need to be changed once Pager is available
*/
inline Page *grab_cache_page(void *vnode, uint32_t nid, uint32_t /*TODO pgoff_t*/ index) {
Page *page = new Page();
page->index = index;
page->host = vnode;
page->host_nid = nid;
return page;
}
inline void *page_address(Page *page) { return (void *)page->data; }
inline Page *find_get_page(/* TODO pgoff_t*/ uint32_t index) { return nullptr; }
inline int PageUptodate(struct Page *page) { return 0; }
inline void SetPageUptodate(struct Page *page) {}
inline void ClearPageUptodate(struct Page *page) {}
inline void ClearPagePrivate(struct Page *) {}
inline int PageDirty(struct Page *) { /*TODO: IMPL: does Page has dirty bit?*/
return 0;
}
static inline int clear_page_dirty_for_io(Page *page) {
return 0;
}
struct writeback_control {};
struct vm_area_struct {};
struct vm_fault {};
static inline void set_page_writeback(Page *page) {
// TODO: Once Pager is available, it could be used before VMO writeback
}
static inline void wait_on_page_writeback(Page *page) {
// TODO: Once Pager is availabe, it could be used for wb synchronization
}
/*
* Checkpoint
*/
inline long long ver_after(unsigned long long a, unsigned long long b) {
return ((long long)(a - b) > 0);
}
/*
* CRC
*/
inline unsigned int f2fs_cal_crc32(unsigned int crc, void *buff, unsigned int len) {
int i;
unsigned char *p = static_cast<unsigned char *>(buff);
while (len--) {
crc ^= *p++;
for (i = 0; i < 8; i++)
crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0);
}
return crc;
}
inline uint32_t f2fs_crc32(void *buff, size_t len) {
return f2fs_cal_crc32(F2FS_SUPER_MAGIC, (unsigned char *)buff, len);
}
inline bool f2fs_crc_valid(uint32_t blk_crc, void *buff, size_t buff_size) {
return f2fs_crc32(buff, buff_size) == blk_crc;
}
/*
* Error code in pointer variables
* TODO: should remove them. there is no room for errno in Fuchsia.
*
*/
inline bool IS_ERR(const void *ptr) { return (ptr == nullptr); }
inline long PTR_ERR(const void *ptr) { return 0; }
inline void *ERR_PTR(long error) { return nullptr; }
/*
* Every lock is using mutex.
* TODO: need to find more appropriate methods for rw lock.
*/
static inline void spin_lock_init(spinlock_t *lock) { mtx_init(lock, mtx_plain); }
static inline void spin_lock(spinlock_t *lock) TA_ACQ(lock) { mtx_lock(lock); }
static inline void spin_unlock(spinlock_t *lock) TA_REL(lock) { mtx_unlock(lock); }
static inline void rwlock_init(rwlock_t *lock) { mtx_init(lock, mtx_plain); }
static inline void _raw_read_lock(rwlock_t *lock) TA_ACQ(lock) { mtx_lock(lock); }
static inline void _raw_write_lock(rwlock_t *lock) TA_ACQ(lock) { mtx_lock(lock); }
#define read_lock(lock) _raw_read_lock(lock)
#define write_lock(lock) _raw_write_lock(lock)
static inline void _raw_read_unlock(rwlock_t *lock) TA_REL(lock) { mtx_unlock(lock); }
static inline void _raw_write_unlock(rwlock_t *lock) TA_REL(lock) { mtx_unlock(lock); }
#define read_unlock(lock) _raw_read_unlock(lock)
#define write_unlock(lock) _raw_write_unlock(lock)
/*
* Bitmap operations
* TODO: some operations (e.g., test_and_set) requires atomicity
*/
#define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
#define BITS_PER_BYTE 8
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
static inline void set_bit(int nr, unsigned long *addr) {
unsigned long long *bitmap = (unsigned long long *)addr;
unsigned long size_per_iter = sizeof(unsigned long long) * 8;
unsigned long iter = nr / size_per_iter;
unsigned long offset_in_iter = nr % size_per_iter;
bitmap[iter] |= 1ULL << offset_in_iter;
}
static inline void clear_bit(int nr, unsigned long *addr) {
unsigned long long *bitmap = (unsigned long long *)addr;
unsigned long size_per_iter = sizeof(unsigned long long) * 8;
unsigned long iter = nr / size_per_iter;
unsigned long offset_in_iter = nr % size_per_iter;
bitmap[iter] &= ~(1ULL << offset_in_iter);
}
static inline int test_bit(int nr, const volatile unsigned long *addr) {
unsigned long long *bitmap = (unsigned long long *)addr;
unsigned long size_per_iter = sizeof(unsigned long long) * 8;
unsigned long iter = nr / size_per_iter;
unsigned long offset_in_iter = nr % size_per_iter;
int ret = (bitmap[iter] & (1ULL << offset_in_iter)) >> offset_in_iter;
return ret;
}
static inline int find_next_zero_bit_le(void *vaddr, unsigned long size, unsigned long offset) {
unsigned long long *bitmap = (unsigned long long *)vaddr;
unsigned long size_per_iter = sizeof(unsigned long long) * 8;
while (offset < size) {
unsigned long iter = offset / size_per_iter;
unsigned long offset_in_iter = offset % size_per_iter;
unsigned long long mask = (~0ULL << offset_in_iter);
unsigned long long res = bitmap[iter] & mask;
if (res != mask) { // found
for (; offset_in_iter < size_per_iter; offset_in_iter++) {
if ((bitmap[iter] & (1ULL << offset_in_iter)) == 0) {
return std::min(iter * size_per_iter + offset_in_iter, size);
}
}
}
offset = (iter + 1) * size_per_iter;
}
return size;
}
static inline int find_next_zero_bit(unsigned long *vaddr, int size, int offset) {
return find_next_zero_bit_le((void *)vaddr, (unsigned long)size, (unsigned long)offset);
}
static inline int find_next_bit_le(void *vaddr, unsigned long size, unsigned long offset) {
unsigned long long *bitmap = (unsigned long long *)vaddr;
unsigned long size_per_iter = sizeof(unsigned long long) * 8;
while (offset < size) {
unsigned long iter = offset / size_per_iter;
unsigned long offset_in_iter = offset % size_per_iter;
unsigned long long mask = (~0ULL << offset_in_iter);
unsigned long long res = bitmap[iter] & mask;
if (res != 0) { // found
for (; offset_in_iter < size_per_iter; offset_in_iter++) {
if ((bitmap[iter] & (1ULL << offset_in_iter)) != 0) {
return std::min(iter * size_per_iter + offset_in_iter, size);
}
}
}
offset = (iter + 1) * size_per_iter;
}
return size;
}
static inline int test_and_set_bit_le(int nr, void *vaddr) {
unsigned long long *bitmap = (unsigned long long *)vaddr;
unsigned long size_per_iter = sizeof(unsigned long long) * 8;
unsigned long iter = nr / size_per_iter;
unsigned long offset_in_iter = nr % size_per_iter;
int ret = (bitmap[iter] & (1ULL << offset_in_iter)) >> offset_in_iter;
bitmap[iter] |= 1ULL << offset_in_iter;
return ret;
}
static inline int test_and_set_bit(int nr, unsigned long *addr) {
return test_and_set_bit_le(nr, addr);
}
static inline int test_and_clear_bit_le(int nr, void *addr) {
unsigned long long *bitmap = (unsigned long long *)addr;
unsigned long size_per_iter = sizeof(unsigned long long) * 8;
unsigned long iter = nr / size_per_iter;
unsigned long offset_in_iter = nr % size_per_iter;
int ret = (bitmap[iter] & (1ULL << offset_in_iter)) >> offset_in_iter;
bitmap[iter] &= ~(1ULL << offset_in_iter);
return ret;
}
static inline int test_and_clear_bit(int nr, unsigned long *addr) {
return test_and_clear_bit_le(nr, addr);
}
static inline void __clear_bit(int nr, volatile void *addr) {
*((uint32_t *)addr + (nr >> 5)) &= ~(1 << (nr & 31));
}
/*
* Atomic wrapper
*/
static inline void atomic_set(atomic_t *t, int value) {
atomic_store_explicit(t, value, memory_order_relaxed);
}
static inline atomic_t atomic_read(atomic_t *t) {
uint32_t ret = atomic_load_explicit(t, memory_order_relaxed);
return ret;
}
static inline void atomic_inc(atomic_t *t) {
atomic_fetch_add_explicit(t, 1, memory_order_relaxed);
}
static inline void atomic_dec(atomic_t *t) {
atomic_fetch_sub_explicit(t, 1, memory_order_relaxed);
}
/*
* List operations
*/
inline void list_move_tail(list_node_t *list, list_node_t *item) {
list_delete(item);
list_add_tail(list, item);
}
static inline void list_add(list_node_t *list, list_node_t *item) {
list->next->prev = item;
item->next = list->next;
item->prev = list;
list->next = item;
}
/*
* Zero segment
*/
static inline void zero_user_segments(Page *page, unsigned start1, unsigned end1, unsigned start2,
unsigned end2) {
char *data = (char *)page_address(page);
ZX_ASSERT(end1 <= PAGE_SIZE && end2 <= PAGE_SIZE);
if (end1 > start1)
memset(data + start1, 0, end1 - start1);
if (end2 > start2)
memset(data + start2, 0, end2 - start2);
}
static inline void zero_user_segment(Page *page, unsigned start, unsigned end) {
zero_user_segments(page, start, end, 0, 0);
}
static inline void zero_user(Page *page, unsigned start, unsigned size) {
zero_user_segments(page, start, start + size, 0, 0);
}
/*
* Inode
*/
static inline void *igrab(void *vnode) {
// TODO: need to add ref. count if vnode is valid
return vnode;
}
static inline void *iput(void *vnode) {
// TODO: need to decrement ref.
// TODO handle vnode according to its vaility when ref = 0
return vnode;
}
static inline int is_bad_inode(void *vnode) {
// TODO: IMPLE. after concluding the way to handle read error for vnode
return false;
}
static inline void clear_inode(void *vnode) {
// TODO: IMPL according to Fuchsia vnode flags and state transition (e.g., I_FREEING | I_CLEAR)
}
static inline void unlock_new_inode(void *vnode) {
// TODO: IMPL according to Fuchsia vnode flags and state transition (e.g., I_NEW)
}
/*
* Misc
* TODO: need to be replaced with Fuchsia methods
*/
#define MS_POSIXACL (1 << 16) /* VFS does not apply the umask */
#define BUG_ON(a)
#define READ 0x0
#define WRITE 0x1
#define FLUSH 0x2
#define FUA 0x4
#define DISCARD 0x08
#define SYNC 0x10
#define READ_SYNC (READ | SYNC)
#define WRITE_SYNC (WRITE | SYNC)
#define WRITE_FLUSH_FUA (WRITE | SYNC | FLUSH| FUA)
#define I_DIRTY_SYNC (1 << 0)
#define I_DIRTY_DATASYNC (1 << 1)
#define I_DIRTY_PAGES (1 << 2)
#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
#define current_fsuid() (getuid())
#define current_fsgid() (getgid())
static inline uint64_t div_u64(uint64_t dividend, uint32_t divisor) {
return dividend / ((uint64_t)divisor);
}
} // namespace f2fs
#endif