f2fs: shrink extent_cache entries

This patch registers shrinking extent_caches.

Reviewed-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
Jaegeuk Kim 2015-06-19 13:41:23 -07:00
parent 1b38dc8e74
commit 554df79e52
4 changed files with 27 additions and 11 deletions

View file

@ -767,7 +767,7 @@ void f2fs_preserve_extent_tree(struct inode *inode)
update_inode_page(inode); update_inode_page(inode);
} }
void f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink) unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
{ {
struct extent_tree *treevec[EXT_TREE_VEC_SIZE]; struct extent_tree *treevec[EXT_TREE_VEC_SIZE];
struct extent_node *en, *tmp; struct extent_node *en, *tmp;
@ -778,10 +778,7 @@ void f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
unsigned int node_cnt = 0, tree_cnt = 0; unsigned int node_cnt = 0, tree_cnt = 0;
if (!test_opt(sbi, EXTENT_CACHE)) if (!test_opt(sbi, EXTENT_CACHE))
return; return 0;
if (available_free_memory(sbi, EXTENT_CACHE))
return;
spin_lock(&sbi->extent_lock); spin_lock(&sbi->extent_lock);
list_for_each_entry_safe(en, tmp, &sbi->extent_list, list) { list_for_each_entry_safe(en, tmp, &sbi->extent_list, list) {
@ -791,7 +788,9 @@ void f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
} }
spin_unlock(&sbi->extent_lock); spin_unlock(&sbi->extent_lock);
down_read(&sbi->extent_tree_lock); if (!down_read_trylock(&sbi->extent_tree_lock))
goto out;
while ((found = radix_tree_gang_lookup(&sbi->extent_tree_root, while ((found = radix_tree_gang_lookup(&sbi->extent_tree_root,
(void **)treevec, ino, EXT_TREE_VEC_SIZE))) { (void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
unsigned i; unsigned i;
@ -809,7 +808,9 @@ void f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
} }
up_read(&sbi->extent_tree_lock); up_read(&sbi->extent_tree_lock);
down_write(&sbi->extent_tree_lock); if (!down_write_trylock(&sbi->extent_tree_lock))
goto out;
radix_tree_for_each_slot(slot, &sbi->extent_tree_root, &iter, radix_tree_for_each_slot(slot, &sbi->extent_tree_root, &iter,
F2FS_ROOT_INO(sbi)) { F2FS_ROOT_INO(sbi)) {
struct extent_tree *et = (struct extent_tree *)*slot; struct extent_tree *et = (struct extent_tree *)*slot;
@ -822,8 +823,10 @@ void f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
} }
} }
up_write(&sbi->extent_tree_lock); up_write(&sbi->extent_tree_lock);
out:
trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt); trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
return node_cnt + tree_cnt;
} }
void f2fs_destroy_extent_tree(struct inode *inode) void f2fs_destroy_extent_tree(struct inode *inode)

View file

@ -1754,7 +1754,7 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *);
void set_data_blkaddr(struct dnode_of_data *); void set_data_blkaddr(struct dnode_of_data *);
int reserve_new_block(struct dnode_of_data *); int reserve_new_block(struct dnode_of_data *);
int f2fs_reserve_block(struct dnode_of_data *, pgoff_t); int f2fs_reserve_block(struct dnode_of_data *, pgoff_t);
void f2fs_shrink_extent_tree(struct f2fs_sb_info *, int); unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *, int);
void f2fs_destroy_extent_tree(struct inode *); void f2fs_destroy_extent_tree(struct inode *);
void f2fs_init_extent_cache(struct inode *, struct f2fs_extent *); void f2fs_init_extent_cache(struct inode *, struct f2fs_extent *);
void f2fs_update_extent_cache(struct dnode_of_data *); void f2fs_update_extent_cache(struct dnode_of_data *);

View file

@ -304,7 +304,8 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi)
void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi) void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
{ {
/* try to shrink extent cache when there is no enough memory */ /* try to shrink extent cache when there is no enough memory */
f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER); if (!available_free_memory(sbi, EXTENT_CACHE))
f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
/* check the # of cached NAT entries */ /* check the # of cached NAT entries */
if (!available_free_memory(sbi, NAT_ENTRIES)) if (!available_free_memory(sbi, NAT_ENTRIES))

View file

@ -23,6 +23,11 @@ static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
return NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt; return NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
} }
static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
{
return sbi->total_ext_tree + atomic_read(&sbi->total_ext_node);
}
unsigned long f2fs_shrink_count(struct shrinker *shrink, unsigned long f2fs_shrink_count(struct shrinker *shrink,
struct shrink_control *sc) struct shrink_control *sc)
{ {
@ -42,6 +47,9 @@ unsigned long f2fs_shrink_count(struct shrinker *shrink,
} }
spin_unlock(&f2fs_list_lock); spin_unlock(&f2fs_list_lock);
/* count extent cache entries */
count += __count_extent_cache(sbi);
/* shrink clean nat cache entries */ /* shrink clean nat cache entries */
count += __count_nat_entries(sbi); count += __count_nat_entries(sbi);
@ -82,8 +90,12 @@ unsigned long f2fs_shrink_scan(struct shrinker *shrink,
sbi->shrinker_run_no = run_no; sbi->shrinker_run_no = run_no;
/* shrink extent cache entries */
freed += f2fs_shrink_extent_tree(sbi, nr >> 1);
/* shrink clean nat cache entries */ /* shrink clean nat cache entries */
freed += try_to_free_nats(sbi, nr); if (freed < nr)
freed += try_to_free_nats(sbi, nr - freed);
spin_lock(&f2fs_list_lock); spin_lock(&f2fs_list_lock);
p = p->next; p = p->next;