f2fs: merge flags in struct f2fs_sb_info

Currently, there are several variables with Boolean type as below:

struct f2fs_sb_info {
...
	int s_dirty;
	bool need_fsck;
	bool s_closing;
...
	bool por_doing;
...
}

For this there are some issues:
1. there are some space of f2fs_sb_info is wasted due to aligning after Boolean
   type variables by compiler.
2. if we continuously add new flag into f2fs_sb_info, structure will be messed
   up.

So in this patch, we try to:
1. switch s_dirty to Boolean type variable since it has two status 0/1.
2. merge s_dirty/need_fsck/s_closing/por_doing variables into s_flag.
3. introduce an enum type which can indicate different states of sbi.
4. use new introduced universal interfaces is_sbi_flag_set/{set,clear}_sbi_flag
   to operate flags for sbi.

After that, above issues will be fixed.

Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
Chao Yu 2015-01-28 17:48:42 +08:00 committed by Jaegeuk Kim
parent 88dd893419
commit caf0047e7e
8 changed files with 46 additions and 35 deletions

View file

@ -190,7 +190,7 @@ static int f2fs_write_meta_page(struct page *page,
trace_f2fs_writepage(page, META);
if (unlikely(sbi->por_doing))
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
goto redirty_out;
@ -485,7 +485,7 @@ void recover_orphan_inodes(struct f2fs_sb_info *sbi)
if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
return;
sbi->por_doing = true;
set_sbi_flag(sbi, SBI_POR_DOING);
start_blk = __start_cp_addr(sbi) + 1 +
le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
@ -506,7 +506,7 @@ void recover_orphan_inodes(struct f2fs_sb_info *sbi)
}
/* clear Orphan Flag */
clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG);
sbi->por_doing = false;
clear_sbi_flag(sbi, SBI_POR_DOING);
return;
}
@ -973,7 +973,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
else
clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
if (sbi->need_fsck)
if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
set_ckpt_flags(ckpt, CP_FSCK_FLAG);
/* update SIT/NAT bitmap */
@ -1047,7 +1047,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
return;
clear_prefree_segments(sbi);
F2FS_RESET_SB_DIRT(sbi);
clear_sbi_flag(sbi, SBI_IS_DIRTY);
}
/*
@ -1062,7 +1062,7 @@ void write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
mutex_lock(&sbi->cp_mutex);
if (!sbi->s_dirty &&
if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
cpc->reason != CP_DISCARD && cpc->reason != CP_UMOUNT)
goto out;
if (unlikely(f2fs_cp_error(sbi)))

View file

@ -818,7 +818,7 @@ static int f2fs_write_data_page(struct page *page,
zero_user_segment(page, offset, PAGE_CACHE_SIZE);
write:
if (unlikely(sbi->por_doing))
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
if (f2fs_is_drop_cache(inode))
goto out;

View file

@ -28,7 +28,7 @@
do { \
if (unlikely(condition)) { \
WARN_ON(1); \
sbi->need_fsck = true; \
set_sbi_flag(sbi, SBI_NEED_FSCK); \
} \
} while (0)
#define f2fs_down_write(x, y) down_write(x)
@ -519,14 +519,20 @@ struct inode_management {
unsigned long ino_num; /* number of entries */
};
/* For s_flag in struct f2fs_sb_info */
enum {
SBI_IS_DIRTY, /* dirty flag for checkpoint */
SBI_IS_CLOSE, /* specify unmounting */
SBI_NEED_FSCK, /* need fsck.f2fs to fix */
SBI_POR_DOING, /* recovery is doing or not */
};
struct f2fs_sb_info {
struct super_block *sb; /* pointer to VFS super block */
struct proc_dir_entry *s_proc; /* proc entry */
struct buffer_head *raw_super_buf; /* buffer head of raw sb */
struct f2fs_super_block *raw_super; /* raw super block pointer */
int s_dirty; /* dirty flag for checkpoint */
bool need_fsck; /* need fsck.f2fs to fix */
bool s_closing; /* specify unmounting */
int s_flag; /* flags for sbi */
/* for node-related operations */
struct f2fs_nm_info *nm_info; /* node manager */
@ -546,7 +552,6 @@ struct f2fs_sb_info {
struct rw_semaphore cp_rwsem; /* blocking FS operations */
struct rw_semaphore node_write; /* locking node writes */
struct mutex writepages; /* mutex for writepages() */
bool por_doing; /* recovery is doing or not */
wait_queue_head_t cp_wait;
struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */
@ -699,14 +704,19 @@ static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
return sbi->node_inode->i_mapping;
}
static inline void F2FS_SET_SB_DIRT(struct f2fs_sb_info *sbi)
static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type)
{
sbi->s_dirty = 1;
return sbi->s_flag & (0x01 << type);
}
static inline void F2FS_RESET_SB_DIRT(struct f2fs_sb_info *sbi)
static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
{
sbi->s_dirty = 0;
sbi->s_flag |= (0x01 << type);
}
static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
{
sbi->s_flag &= ~(0x01 << type);
}
static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
@ -818,7 +828,7 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
{
atomic_inc(&sbi->nr_pages[count_type]);
F2FS_SET_SB_DIRT(sbi);
set_sbi_flag(sbi, SBI_IS_DIRTY);
}
static inline void inode_inc_dirty_pages(struct inode *inode)

View file

@ -588,7 +588,7 @@ static void truncate_node(struct dnode_of_data *dn)
}
invalidate:
clear_node_page_dirty(dn->node_page);
F2FS_SET_SB_DIRT(sbi);
set_sbi_flag(sbi, SBI_IS_DIRTY);
f2fs_put_page(dn->node_page, 1);
@ -1284,7 +1284,7 @@ static int f2fs_write_node_page(struct page *page,
trace_f2fs_writepage(page, NODE);
if (unlikely(sbi->por_doing))
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
if (unlikely(f2fs_cp_error(sbi)))
goto redirty_out;

View file

@ -508,7 +508,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
INIT_LIST_HEAD(&inode_list);
/* step #1: find fsynced inode numbers */
sbi->por_doing = true;
set_sbi_flag(sbi, SBI_POR_DOING);
/* prevent checkpoint */
mutex_lock(&sbi->cp_mutex);
@ -541,7 +541,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
truncate_inode_pages_final(META_MAPPING(sbi));
}
sbi->por_doing = false;
clear_sbi_flag(sbi, SBI_POR_DOING);
if (err) {
discard_next_dnode(sbi, blkaddr);

View file

@ -460,7 +460,7 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed)
int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
if (unlikely(sbi->por_doing))
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
return false;
return (free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs +
@ -599,13 +599,13 @@ static inline void check_block_count(struct f2fs_sb_info *sbi,
static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
{
if (segno > TOTAL_SEGS(sbi) - 1)
sbi->need_fsck = true;
set_sbi_flag(sbi, SBI_NEED_FSCK);
}
static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
{
if (blk_addr < SEG0_BLKADDR(sbi) || blk_addr >= MAX_BLKADDR(sbi))
sbi->need_fsck = true;
set_sbi_flag(sbi, SBI_NEED_FSCK);
}
/*
@ -616,11 +616,11 @@ static inline void check_block_count(struct f2fs_sb_info *sbi,
{
/* check segment usage */
if (GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg)
sbi->need_fsck = true;
set_sbi_flag(sbi, SBI_NEED_FSCK);
/* check boundary of a given segment number */
if (segno > TOTAL_SEGS(sbi) - 1)
sbi->need_fsck = true;
set_sbi_flag(sbi, SBI_NEED_FSCK);
}
#endif

View file

@ -452,7 +452,7 @@ static void f2fs_put_super(struct super_block *sb)
* But, the previous checkpoint was not done by umount, it needs to do
* clean checkpoint again.
*/
if (sbi->s_dirty ||
if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) {
struct cp_control cpc = {
.reason = CP_UMOUNT,
@ -492,8 +492,9 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
if (sync) {
struct cp_control cpc;
cpc.reason = (test_opt(sbi, FASTBOOT) || sbi->s_closing) ?
CP_UMOUNT : CP_SYNC;
cpc.reason = (test_opt(sbi, FASTBOOT) ||
is_sbi_flag_set(sbi, SBI_IS_CLOSE)) ?
CP_UMOUNT : CP_SYNC;
mutex_lock(&sbi->gc_mutex);
write_checkpoint(sbi, &cpc);
mutex_unlock(&sbi->gc_mutex);
@ -895,7 +896,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
atomic_set(&sbi->nr_pages[i], 0);
sbi->dir_level = DEF_DIR_LEVEL;
sbi->need_fsck = false;
clear_sbi_flag(sbi, SBI_NEED_FSCK);
}
/*
@ -1006,7 +1007,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
mutex_init(&sbi->writepages);
mutex_init(&sbi->cp_mutex);
init_rwsem(&sbi->node_write);
sbi->por_doing = false;
clear_sbi_flag(sbi, SBI_POR_DOING);
spin_lock_init(&sbi->stat_lock);
init_rwsem(&sbi->read_io.io_rwsem);
@ -1130,7 +1131,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
goto free_proc;
if (!retry)
sbi->need_fsck = true;
set_sbi_flag(sbi, SBI_NEED_FSCK);
/* recover fsynced data */
if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
@ -1199,7 +1200,7 @@ static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
static void kill_f2fs_super(struct super_block *sb)
{
if (sb->s_root)
F2FS_SB(sb)->s_closing = true;
set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE);
kill_block_super(sb);
}

View file

@ -184,13 +184,13 @@ TRACE_EVENT(f2fs_sync_fs,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(int, dirty)
__field(bool, dirty)
__field(int, wait)
),
TP_fast_assign(
__entry->dev = sb->s_dev;
__entry->dirty = F2FS_SB(sb)->s_dirty;
__entry->dirty = is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY);
__entry->wait = wait;
),