diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 7dd63b794bfb..a05eb35a372c 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -328,11 +328,11 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) { struct f2fs_nm_info *nm_i = NM_I(sbi); + int nr = nr_shrink; - if (available_free_memory(sbi, NAT_ENTRIES)) + if (!down_write_trylock(&nm_i->nat_tree_lock)) return 0; - down_write(&nm_i->nat_tree_lock); while (nr_shrink && !list_empty(&nm_i->nat_entries)) { struct nat_entry *ne; ne = list_first_entry(&nm_i->nat_entries, @@ -341,7 +341,7 @@ int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) nr_shrink--; } up_write(&nm_i->nat_tree_lock); - return nr_shrink; + return nr - nr_shrink; } /* diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 61b97f9cb9f6..d5ee99258cbc 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -306,8 +306,12 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi) /* try to shrink extent cache when there is no enough memory */ f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER); - /* check the # of cached NAT entries and prefree segments */ - if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) || + /* check the # of cached NAT entries */ + if (!available_free_memory(sbi, NAT_ENTRIES)) + try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK); + + /* checkpoint is the only way to shrink partial cached entries */ + if (!available_free_memory(sbi, NAT_ENTRIES) || excess_prefree_segs(sbi) || !available_free_memory(sbi, INO_ENTRIES)) f2fs_sync_fs(sbi->sb, true); diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c index 16e9b43635c2..c4bd6ee5936c 100644 --- a/fs/f2fs/shrinker.c +++ b/fs/f2fs/shrinker.c @@ -18,6 +18,11 @@ static LIST_HEAD(f2fs_list); static DEFINE_SPINLOCK(f2fs_list_lock); static unsigned int shrinker_run_no; +static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi) +{ + return NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt; +} + unsigned long f2fs_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { @@ -37,7 +42,8 @@ unsigned long f2fs_shrink_count(struct shrinker *shrink, } spin_unlock(&f2fs_list_lock); - /* TODO: count # of objects */ + /* shrink clean nat cache entries */ + count += __count_nat_entries(sbi); spin_lock(&f2fs_list_lock); p = p->next; @@ -76,7 +82,8 @@ unsigned long f2fs_shrink_scan(struct shrinker *shrink, sbi->shrinker_run_no = run_no; - /* TODO: shrink caches */ + /* shrink clean nat cache entries */ + freed += try_to_free_nats(sbi, nr); spin_lock(&f2fs_list_lock); p = p->next;