f2fs: introduce dirty list node in inode info

Add a new dirt list node member in inode info for linking the inode to
global dirty list in superblock, instead of old implementation which
allocate slab cache memory as an entry to inode.

It avoids memory pressure due to slab cache allocation, and also makes
codes more clean.

Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
Chao Yu 2015-12-15 13:30:45 +08:00 committed by Jaegeuk Kim
parent a49324f127
commit 2710fd7e00
4 changed files with 20 additions and 46 deletions

View file

@ -722,25 +722,23 @@ int get_valid_checkpoint(struct f2fs_sb_info *sbi)
return -EINVAL; return -EINVAL;
} }
static int __add_dirty_inode(struct inode *inode, struct inode_entry *new) static void __add_dirty_inode(struct inode *inode)
{ {
struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
if (is_inode_flag_set(F2FS_I(inode), FI_DIRTY_DIR)) if (is_inode_flag_set(fi, FI_DIRTY_DIR))
return -EEXIST; return;
set_inode_flag(F2FS_I(inode), FI_DIRTY_DIR); set_inode_flag(fi, FI_DIRTY_DIR);
F2FS_I(inode)->dirty_dir = new; list_add_tail(&fi->dirty_list, &sbi->dir_inode_list);
list_add_tail(&new->list, &sbi->dir_inode_list);
stat_inc_dirty_dir(sbi); stat_inc_dirty_dir(sbi);
return 0; return;
} }
void update_dirty_page(struct inode *inode, struct page *page) void update_dirty_page(struct inode *inode, struct page *page)
{ {
struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct inode_entry *new;
int ret = 0;
if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
!S_ISLNK(inode->i_mode)) !S_ISLNK(inode->i_mode))
@ -751,17 +749,11 @@ void update_dirty_page(struct inode *inode, struct page *page)
goto out; goto out;
} }
new = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
new->inode = inode;
INIT_LIST_HEAD(&new->list);
spin_lock(&sbi->dir_inode_lock); spin_lock(&sbi->dir_inode_lock);
ret = __add_dirty_inode(inode, new); __add_dirty_inode(inode);
inode_inc_dirty_pages(inode); inode_inc_dirty_pages(inode);
spin_unlock(&sbi->dir_inode_lock); spin_unlock(&sbi->dir_inode_lock);
if (ret)
kmem_cache_free(inode_entry_slab, new);
out: out:
SetPagePrivate(page); SetPagePrivate(page);
f2fs_trace_pid(page); f2fs_trace_pid(page);
@ -770,25 +762,16 @@ void update_dirty_page(struct inode *inode, struct page *page)
void add_dirty_dir_inode(struct inode *inode) void add_dirty_dir_inode(struct inode *inode)
{ {
struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct inode_entry *new =
f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
int ret = 0;
new->inode = inode;
INIT_LIST_HEAD(&new->list);
spin_lock(&sbi->dir_inode_lock); spin_lock(&sbi->dir_inode_lock);
ret = __add_dirty_inode(inode, new); __add_dirty_inode(inode);
spin_unlock(&sbi->dir_inode_lock); spin_unlock(&sbi->dir_inode_lock);
if (ret)
kmem_cache_free(inode_entry_slab, new);
} }
void remove_dirty_dir_inode(struct inode *inode) void remove_dirty_dir_inode(struct inode *inode)
{ {
struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct inode_entry *entry; struct f2fs_inode_info *fi = F2FS_I(inode);
if (!S_ISDIR(inode->i_mode)) if (!S_ISDIR(inode->i_mode))
return; return;
@ -800,17 +783,14 @@ void remove_dirty_dir_inode(struct inode *inode)
return; return;
} }
entry = F2FS_I(inode)->dirty_dir; list_del_init(&fi->dirty_list);
list_del(&entry->list); clear_inode_flag(fi, FI_DIRTY_DIR);
F2FS_I(inode)->dirty_dir = NULL;
clear_inode_flag(F2FS_I(inode), FI_DIRTY_DIR);
stat_dec_dirty_dir(sbi); stat_dec_dirty_dir(sbi);
spin_unlock(&sbi->dir_inode_lock); spin_unlock(&sbi->dir_inode_lock);
kmem_cache_free(inode_entry_slab, entry);
/* Only from the recovery routine */ /* Only from the recovery routine */
if (is_inode_flag_set(F2FS_I(inode), FI_DELAY_IPUT)) { if (is_inode_flag_set(fi, FI_DELAY_IPUT)) {
clear_inode_flag(F2FS_I(inode), FI_DELAY_IPUT); clear_inode_flag(fi, FI_DELAY_IPUT);
iput(inode); iput(inode);
} }
} }
@ -818,8 +798,8 @@ void remove_dirty_dir_inode(struct inode *inode)
void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi) void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi)
{ {
struct list_head *head; struct list_head *head;
struct inode_entry *entry;
struct inode *inode; struct inode *inode;
struct f2fs_inode_info *fi;
retry: retry:
if (unlikely(f2fs_cp_error(sbi))) if (unlikely(f2fs_cp_error(sbi)))
return; return;
@ -831,8 +811,8 @@ void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi)
spin_unlock(&sbi->dir_inode_lock); spin_unlock(&sbi->dir_inode_lock);
return; return;
} }
entry = list_entry(head->next, struct inode_entry, list); fi = list_entry(head->next, struct f2fs_inode_info, dirty_list);
inode = igrab(entry->inode); inode = igrab(&fi->vfs_inode);
spin_unlock(&sbi->dir_inode_lock); spin_unlock(&sbi->dir_inode_lock);
if (inode) { if (inode) {
filemap_fdatawrite(inode->i_mapping); filemap_fdatawrite(inode->i_mapping);

View file

@ -189,7 +189,6 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
si->cache_mem += NM_I(sbi)->dirty_nat_cnt * si->cache_mem += NM_I(sbi)->dirty_nat_cnt *
sizeof(struct nat_entry_set); sizeof(struct nat_entry_set);
si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages); si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages);
si->cache_mem += sbi->n_dirty_dirs * sizeof(struct inode_entry);
for (i = 0; i <= UPDATE_INO; i++) for (i = 0; i <= UPDATE_INO; i++)
si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry); si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry);
si->cache_mem += sbi->total_ext_tree * sizeof(struct extent_tree); si->cache_mem += sbi->total_ext_tree * sizeof(struct extent_tree);

View file

@ -158,13 +158,7 @@ struct ino_entry {
nid_t ino; /* inode number */ nid_t ino; /* inode number */
}; };
/* /* for the list of inodes to be GCed */
* for the list of directory inodes or gc inodes.
* NOTE: there are two slab users for this structure, if we add/modify/delete
* fields in structure for one of slab users, it may affect fields or size of
* other one, in this condition, it's better to split both of slab and related
* data structure.
*/
struct inode_entry { struct inode_entry {
struct list_head list; /* list head */ struct list_head list; /* list head */
struct inode *inode; /* vfs inode pointer */ struct inode *inode; /* vfs inode pointer */
@ -441,8 +435,8 @@ struct f2fs_inode_info {
unsigned int clevel; /* maximum level of given file name */ unsigned int clevel; /* maximum level of given file name */
nid_t i_xattr_nid; /* node id that contains xattrs */ nid_t i_xattr_nid; /* node id that contains xattrs */
unsigned long long xattr_ver; /* cp version of xattr modification */ unsigned long long xattr_ver; /* cp version of xattr modification */
struct inode_entry *dirty_dir; /* the pointer of dirty dir */
struct list_head dirty_list; /* linked in global dirty list */
struct list_head inmem_pages; /* inmemory pages managed by f2fs */ struct list_head inmem_pages; /* inmemory pages managed by f2fs */
struct mutex inmem_lock; /* lock for inmemory pages */ struct mutex inmem_lock; /* lock for inmemory pages */

View file

@ -432,6 +432,7 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
fi->i_current_depth = 1; fi->i_current_depth = 1;
fi->i_advise = 0; fi->i_advise = 0;
init_rwsem(&fi->i_sem); init_rwsem(&fi->i_sem);
INIT_LIST_HEAD(&fi->dirty_list);
INIT_LIST_HEAD(&fi->inmem_pages); INIT_LIST_HEAD(&fi->inmem_pages);
mutex_init(&fi->inmem_lock); mutex_init(&fi->inmem_lock);