f2fs: move f2fs to use reader-unfair rwsems

f2fs rw_semaphores work better if writers can starve readers,
especially for the checkpoint thread, because writers are strictly
more important than reader threads. This prevents significant priority
inversion between low-priority readers that blocked while trying to
acquire the read lock and a second acquisition of the write lock that
might be blocking high priority work.

Signed-off-by: Tim Murray <timmurray@google.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
Tim Murray 2022-01-07 12:48:44 -08:00 committed by Jaegeuk Kim
parent dd81e1c7d5
commit e4544b63a7
16 changed files with 342 additions and 274 deletions

View file

@ -351,13 +351,13 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
goto skip_write; goto skip_write;
/* if locked failed, cp will flush dirty pages instead */ /* if locked failed, cp will flush dirty pages instead */
if (!down_write_trylock(&sbi->cp_global_sem)) if (!f2fs_down_write_trylock(&sbi->cp_global_sem))
goto skip_write; goto skip_write;
trace_f2fs_writepages(mapping->host, wbc, META); trace_f2fs_writepages(mapping->host, wbc, META);
diff = nr_pages_to_write(sbi, META, wbc); diff = nr_pages_to_write(sbi, META, wbc);
written = f2fs_sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO); written = f2fs_sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO);
up_write(&sbi->cp_global_sem); f2fs_up_write(&sbi->cp_global_sem);
wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff); wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
return 0; return 0;
@ -1159,7 +1159,7 @@ static bool __need_flush_quota(struct f2fs_sb_info *sbi)
if (!is_journalled_quota(sbi)) if (!is_journalled_quota(sbi))
return false; return false;
if (!down_write_trylock(&sbi->quota_sem)) if (!f2fs_down_write_trylock(&sbi->quota_sem))
return true; return true;
if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) { if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) {
ret = false; ret = false;
@ -1171,7 +1171,7 @@ static bool __need_flush_quota(struct f2fs_sb_info *sbi)
} else if (get_pages(sbi, F2FS_DIRTY_QDATA)) { } else if (get_pages(sbi, F2FS_DIRTY_QDATA)) {
ret = true; ret = true;
} }
up_write(&sbi->quota_sem); f2fs_up_write(&sbi->quota_sem);
return ret; return ret;
} }
@ -1228,10 +1228,10 @@ static int block_operations(struct f2fs_sb_info *sbi)
* POR: we should ensure that there are no dirty node pages * POR: we should ensure that there are no dirty node pages
* until finishing nat/sit flush. inode->i_blocks can be updated. * until finishing nat/sit flush. inode->i_blocks can be updated.
*/ */
down_write(&sbi->node_change); f2fs_down_write(&sbi->node_change);
if (get_pages(sbi, F2FS_DIRTY_IMETA)) { if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
up_write(&sbi->node_change); f2fs_up_write(&sbi->node_change);
f2fs_unlock_all(sbi); f2fs_unlock_all(sbi);
err = f2fs_sync_inode_meta(sbi); err = f2fs_sync_inode_meta(sbi);
if (err) if (err)
@ -1241,15 +1241,15 @@ static int block_operations(struct f2fs_sb_info *sbi)
} }
retry_flush_nodes: retry_flush_nodes:
down_write(&sbi->node_write); f2fs_down_write(&sbi->node_write);
if (get_pages(sbi, F2FS_DIRTY_NODES)) { if (get_pages(sbi, F2FS_DIRTY_NODES)) {
up_write(&sbi->node_write); f2fs_up_write(&sbi->node_write);
atomic_inc(&sbi->wb_sync_req[NODE]); atomic_inc(&sbi->wb_sync_req[NODE]);
err = f2fs_sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO); err = f2fs_sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO);
atomic_dec(&sbi->wb_sync_req[NODE]); atomic_dec(&sbi->wb_sync_req[NODE]);
if (err) { if (err) {
up_write(&sbi->node_change); f2fs_up_write(&sbi->node_change);
f2fs_unlock_all(sbi); f2fs_unlock_all(sbi);
return err; return err;
} }
@ -1262,13 +1262,13 @@ static int block_operations(struct f2fs_sb_info *sbi)
* dirty node blocks and some checkpoint values by block allocation. * dirty node blocks and some checkpoint values by block allocation.
*/ */
__prepare_cp_block(sbi); __prepare_cp_block(sbi);
up_write(&sbi->node_change); f2fs_up_write(&sbi->node_change);
return err; return err;
} }
static void unblock_operations(struct f2fs_sb_info *sbi) static void unblock_operations(struct f2fs_sb_info *sbi)
{ {
up_write(&sbi->node_write); f2fs_up_write(&sbi->node_write);
f2fs_unlock_all(sbi); f2fs_unlock_all(sbi);
} }
@ -1612,7 +1612,7 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
f2fs_warn(sbi, "Start checkpoint disabled!"); f2fs_warn(sbi, "Start checkpoint disabled!");
} }
if (cpc->reason != CP_RESIZE) if (cpc->reason != CP_RESIZE)
down_write(&sbi->cp_global_sem); f2fs_down_write(&sbi->cp_global_sem);
if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) && if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) || ((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) ||
@ -1693,7 +1693,7 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint"); trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
out: out:
if (cpc->reason != CP_RESIZE) if (cpc->reason != CP_RESIZE)
up_write(&sbi->cp_global_sem); f2fs_up_write(&sbi->cp_global_sem);
return err; return err;
} }
@ -1741,9 +1741,9 @@ static int __write_checkpoint_sync(struct f2fs_sb_info *sbi)
struct cp_control cpc = { .reason = CP_SYNC, }; struct cp_control cpc = { .reason = CP_SYNC, };
int err; int err;
down_write(&sbi->gc_lock); f2fs_down_write(&sbi->gc_lock);
err = f2fs_write_checkpoint(sbi, &cpc); err = f2fs_write_checkpoint(sbi, &cpc);
up_write(&sbi->gc_lock); f2fs_up_write(&sbi->gc_lock);
return err; return err;
} }
@ -1831,9 +1831,9 @@ int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi)
if (!test_opt(sbi, MERGE_CHECKPOINT) || cpc.reason != CP_SYNC) { if (!test_opt(sbi, MERGE_CHECKPOINT) || cpc.reason != CP_SYNC) {
int ret; int ret;
down_write(&sbi->gc_lock); f2fs_down_write(&sbi->gc_lock);
ret = f2fs_write_checkpoint(sbi, &cpc); ret = f2fs_write_checkpoint(sbi, &cpc);
up_write(&sbi->gc_lock); f2fs_up_write(&sbi->gc_lock);
return ret; return ret;
} }

View file

@ -1267,7 +1267,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
* checkpoint. This can only happen to quota writes which can cause * checkpoint. This can only happen to quota writes which can cause
* the below discard race condition. * the below discard race condition.
*/ */
down_read(&sbi->node_write); f2fs_down_read(&sbi->node_write);
} else if (!f2fs_trylock_op(sbi)) { } else if (!f2fs_trylock_op(sbi)) {
goto out_free; goto out_free;
} }
@ -1384,7 +1384,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
f2fs_put_dnode(&dn); f2fs_put_dnode(&dn);
if (IS_NOQUOTA(inode)) if (IS_NOQUOTA(inode))
up_read(&sbi->node_write); f2fs_up_read(&sbi->node_write);
else else
f2fs_unlock_op(sbi); f2fs_unlock_op(sbi);
@ -1410,7 +1410,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
f2fs_put_dnode(&dn); f2fs_put_dnode(&dn);
out_unlock_op: out_unlock_op:
if (IS_NOQUOTA(inode)) if (IS_NOQUOTA(inode))
up_read(&sbi->node_write); f2fs_up_read(&sbi->node_write);
else else
f2fs_unlock_op(sbi); f2fs_unlock_op(sbi);
out_free: out_free:

View file

@ -590,7 +590,7 @@ static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
enum page_type btype = PAGE_TYPE_OF_BIO(type); enum page_type btype = PAGE_TYPE_OF_BIO(type);
struct f2fs_bio_info *io = sbi->write_io[btype] + temp; struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
down_write(&io->io_rwsem); f2fs_down_write(&io->io_rwsem);
/* change META to META_FLUSH in the checkpoint procedure */ /* change META to META_FLUSH in the checkpoint procedure */
if (type >= META_FLUSH) { if (type >= META_FLUSH) {
@ -601,7 +601,7 @@ static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA; io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
} }
__submit_merged_bio(io); __submit_merged_bio(io);
up_write(&io->io_rwsem); f2fs_up_write(&io->io_rwsem);
} }
static void __submit_merged_write_cond(struct f2fs_sb_info *sbi, static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
@ -616,9 +616,9 @@ static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
enum page_type btype = PAGE_TYPE_OF_BIO(type); enum page_type btype = PAGE_TYPE_OF_BIO(type);
struct f2fs_bio_info *io = sbi->write_io[btype] + temp; struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
down_read(&io->io_rwsem); f2fs_down_read(&io->io_rwsem);
ret = __has_merged_page(io->bio, inode, page, ino); ret = __has_merged_page(io->bio, inode, page, ino);
up_read(&io->io_rwsem); f2fs_up_read(&io->io_rwsem);
} }
if (ret) if (ret)
__f2fs_submit_merged_write(sbi, type, temp); __f2fs_submit_merged_write(sbi, type, temp);
@ -742,9 +742,9 @@ static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE) if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
f2fs_bug_on(sbi, 1); f2fs_bug_on(sbi, 1);
down_write(&io->bio_list_lock); f2fs_down_write(&io->bio_list_lock);
list_add_tail(&be->list, &io->bio_list); list_add_tail(&be->list, &io->bio_list);
up_write(&io->bio_list_lock); f2fs_up_write(&io->bio_list_lock);
} }
static void del_bio_entry(struct bio_entry *be) static void del_bio_entry(struct bio_entry *be)
@ -766,7 +766,7 @@ static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
struct list_head *head = &io->bio_list; struct list_head *head = &io->bio_list;
struct bio_entry *be; struct bio_entry *be;
down_write(&io->bio_list_lock); f2fs_down_write(&io->bio_list_lock);
list_for_each_entry(be, head, list) { list_for_each_entry(be, head, list) {
if (be->bio != *bio) if (be->bio != *bio)
continue; continue;
@ -790,7 +790,7 @@ static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
__submit_bio(sbi, *bio, DATA); __submit_bio(sbi, *bio, DATA);
break; break;
} }
up_write(&io->bio_list_lock); f2fs_up_write(&io->bio_list_lock);
} }
if (ret) { if (ret) {
@ -816,7 +816,7 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
if (list_empty(head)) if (list_empty(head))
continue; continue;
down_read(&io->bio_list_lock); f2fs_down_read(&io->bio_list_lock);
list_for_each_entry(be, head, list) { list_for_each_entry(be, head, list) {
if (target) if (target)
found = (target == be->bio); found = (target == be->bio);
@ -826,14 +826,14 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
if (found) if (found)
break; break;
} }
up_read(&io->bio_list_lock); f2fs_up_read(&io->bio_list_lock);
if (!found) if (!found)
continue; continue;
found = false; found = false;
down_write(&io->bio_list_lock); f2fs_down_write(&io->bio_list_lock);
list_for_each_entry(be, head, list) { list_for_each_entry(be, head, list) {
if (target) if (target)
found = (target == be->bio); found = (target == be->bio);
@ -846,7 +846,7 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
break; break;
} }
} }
up_write(&io->bio_list_lock); f2fs_up_write(&io->bio_list_lock);
} }
if (found) if (found)
@ -906,7 +906,7 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
f2fs_bug_on(sbi, is_read_io(fio->op)); f2fs_bug_on(sbi, is_read_io(fio->op));
down_write(&io->io_rwsem); f2fs_down_write(&io->io_rwsem);
next: next:
if (fio->in_list) { if (fio->in_list) {
spin_lock(&io->io_lock); spin_lock(&io->io_lock);
@ -973,7 +973,7 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) || if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
!f2fs_is_checkpoint_ready(sbi)) !f2fs_is_checkpoint_ready(sbi))
__submit_merged_bio(io); __submit_merged_bio(io);
up_write(&io->io_rwsem); f2fs_up_write(&io->io_rwsem);
} }
static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr, static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
@ -1383,9 +1383,9 @@ void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
{ {
if (flag == F2FS_GET_BLOCK_PRE_AIO) { if (flag == F2FS_GET_BLOCK_PRE_AIO) {
if (lock) if (lock)
down_read(&sbi->node_change); f2fs_down_read(&sbi->node_change);
else else
up_read(&sbi->node_change); f2fs_up_read(&sbi->node_change);
} else { } else {
if (lock) if (lock)
f2fs_lock_op(sbi); f2fs_lock_op(sbi);
@ -2749,13 +2749,13 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
* the below discard race condition. * the below discard race condition.
*/ */
if (IS_NOQUOTA(inode)) if (IS_NOQUOTA(inode))
down_read(&sbi->node_write); f2fs_down_read(&sbi->node_write);
fio.need_lock = LOCK_DONE; fio.need_lock = LOCK_DONE;
err = f2fs_do_write_data_page(&fio); err = f2fs_do_write_data_page(&fio);
if (IS_NOQUOTA(inode)) if (IS_NOQUOTA(inode))
up_read(&sbi->node_write); f2fs_up_read(&sbi->node_write);
goto done; goto done;
} }
@ -3213,14 +3213,14 @@ void f2fs_write_failed(struct inode *inode, loff_t to)
/* In the fs-verity case, f2fs_end_enable_verity() does the truncate */ /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
if (to > i_size && !f2fs_verity_in_progress(inode)) { if (to > i_size && !f2fs_verity_in_progress(inode)) {
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(inode->i_mapping); filemap_invalidate_lock(inode->i_mapping);
truncate_pagecache(inode, i_size); truncate_pagecache(inode, i_size);
f2fs_truncate_blocks(inode, i_size, true); f2fs_truncate_blocks(inode, i_size, true);
filemap_invalidate_unlock(inode->i_mapping); filemap_invalidate_unlock(inode->i_mapping);
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
} }
} }
@ -3721,13 +3721,13 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
unsigned int end_sec = secidx + blkcnt / blk_per_sec; unsigned int end_sec = secidx + blkcnt / blk_per_sec;
int ret = 0; int ret = 0;
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(inode->i_mapping); filemap_invalidate_lock(inode->i_mapping);
set_inode_flag(inode, FI_ALIGNED_WRITE); set_inode_flag(inode, FI_ALIGNED_WRITE);
for (; secidx < end_sec; secidx++) { for (; secidx < end_sec; secidx++) {
down_write(&sbi->pin_sem); f2fs_down_write(&sbi->pin_sem);
f2fs_lock_op(sbi); f2fs_lock_op(sbi);
f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false); f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
@ -3741,7 +3741,7 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
page = f2fs_get_lock_data_page(inode, blkidx, true); page = f2fs_get_lock_data_page(inode, blkidx, true);
if (IS_ERR(page)) { if (IS_ERR(page)) {
up_write(&sbi->pin_sem); f2fs_up_write(&sbi->pin_sem);
ret = PTR_ERR(page); ret = PTR_ERR(page);
goto done; goto done;
} }
@ -3754,7 +3754,7 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
ret = filemap_fdatawrite(inode->i_mapping); ret = filemap_fdatawrite(inode->i_mapping);
up_write(&sbi->pin_sem); f2fs_up_write(&sbi->pin_sem);
if (ret) if (ret)
break; break;
@ -3765,7 +3765,7 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
clear_inode_flag(inode, FI_ALIGNED_WRITE); clear_inode_flag(inode, FI_ALIGNED_WRITE);
filemap_invalidate_unlock(inode->i_mapping); filemap_invalidate_unlock(inode->i_mapping);
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
return ret; return ret;
} }

View file

@ -766,7 +766,7 @@ int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname,
f2fs_wait_on_page_writeback(dentry_page, DATA, true, true); f2fs_wait_on_page_writeback(dentry_page, DATA, true, true);
if (inode) { if (inode) {
down_write(&F2FS_I(inode)->i_sem); f2fs_down_write(&F2FS_I(inode)->i_sem);
page = f2fs_init_inode_metadata(inode, dir, fname, NULL); page = f2fs_init_inode_metadata(inode, dir, fname, NULL);
if (IS_ERR(page)) { if (IS_ERR(page)) {
err = PTR_ERR(page); err = PTR_ERR(page);
@ -793,7 +793,7 @@ int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname,
f2fs_update_parent_metadata(dir, inode, current_depth); f2fs_update_parent_metadata(dir, inode, current_depth);
fail: fail:
if (inode) if (inode)
up_write(&F2FS_I(inode)->i_sem); f2fs_up_write(&F2FS_I(inode)->i_sem);
f2fs_put_page(dentry_page, 1); f2fs_put_page(dentry_page, 1);
@ -858,7 +858,7 @@ int f2fs_do_tmpfile(struct inode *inode, struct inode *dir)
struct page *page; struct page *page;
int err = 0; int err = 0;
down_write(&F2FS_I(inode)->i_sem); f2fs_down_write(&F2FS_I(inode)->i_sem);
page = f2fs_init_inode_metadata(inode, dir, NULL, NULL); page = f2fs_init_inode_metadata(inode, dir, NULL, NULL);
if (IS_ERR(page)) { if (IS_ERR(page)) {
err = PTR_ERR(page); err = PTR_ERR(page);
@ -869,7 +869,7 @@ int f2fs_do_tmpfile(struct inode *inode, struct inode *dir)
clear_inode_flag(inode, FI_NEW_INODE); clear_inode_flag(inode, FI_NEW_INODE);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
fail: fail:
up_write(&F2FS_I(inode)->i_sem); f2fs_up_write(&F2FS_I(inode)->i_sem);
return err; return err;
} }
@ -877,7 +877,7 @@ void f2fs_drop_nlink(struct inode *dir, struct inode *inode)
{ {
struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
down_write(&F2FS_I(inode)->i_sem); f2fs_down_write(&F2FS_I(inode)->i_sem);
if (S_ISDIR(inode->i_mode)) if (S_ISDIR(inode->i_mode))
f2fs_i_links_write(dir, false); f2fs_i_links_write(dir, false);
@ -888,7 +888,7 @@ void f2fs_drop_nlink(struct inode *dir, struct inode *inode)
f2fs_i_links_write(inode, false); f2fs_i_links_write(inode, false);
f2fs_i_size_write(inode, 0); f2fs_i_size_write(inode, 0);
} }
up_write(&F2FS_I(inode)->i_sem); f2fs_up_write(&F2FS_I(inode)->i_sem);
if (inode->i_nlink == 0) if (inode->i_nlink == 0)
f2fs_add_orphan_inode(inode); f2fs_add_orphan_inode(inode);

View file

@ -123,6 +123,18 @@ typedef u32 nid_t;
#define COMPRESS_EXT_NUM 16 #define COMPRESS_EXT_NUM 16
/*
* An implementation of an rwsem that is explicitly unfair to readers. This
* prevents priority inversion when a low-priority reader acquires the read lock
* while sleeping on the write lock but the write lock is needed by
* higher-priority clients.
*/
struct f2fs_rwsem {
struct rw_semaphore internal_rwsem;
wait_queue_head_t read_waiters;
};
struct f2fs_mount_info { struct f2fs_mount_info {
unsigned int opt; unsigned int opt;
int write_io_size_bits; /* Write IO size bits */ int write_io_size_bits; /* Write IO size bits */
@ -752,7 +764,7 @@ struct f2fs_inode_info {
/* Use below internally in f2fs*/ /* Use below internally in f2fs*/
unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */ unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */
struct rw_semaphore i_sem; /* protect fi info */ struct f2fs_rwsem i_sem; /* protect fi info */
atomic_t dirty_pages; /* # of dirty pages */ atomic_t dirty_pages; /* # of dirty pages */
f2fs_hash_t chash; /* hash value of given file name */ f2fs_hash_t chash; /* hash value of given file name */
unsigned int clevel; /* maximum level of given file name */ unsigned int clevel; /* maximum level of given file name */
@ -777,8 +789,8 @@ struct f2fs_inode_info {
struct extent_tree *extent_tree; /* cached extent_tree entry */ struct extent_tree *extent_tree; /* cached extent_tree entry */
/* avoid racing between foreground op and gc */ /* avoid racing between foreground op and gc */
struct rw_semaphore i_gc_rwsem[2]; struct f2fs_rwsem i_gc_rwsem[2];
struct rw_semaphore i_xattr_sem; /* avoid racing between reading and changing EAs */ struct f2fs_rwsem i_xattr_sem; /* avoid racing between reading and changing EAs */
int i_extra_isize; /* size of extra space located in i_addr */ int i_extra_isize; /* size of extra space located in i_addr */
kprojid_t i_projid; /* id for project quota */ kprojid_t i_projid; /* id for project quota */
@ -904,7 +916,7 @@ struct f2fs_nm_info {
/* NAT cache management */ /* NAT cache management */
struct radix_tree_root nat_root;/* root of the nat entry cache */ struct radix_tree_root nat_root;/* root of the nat entry cache */
struct radix_tree_root nat_set_root;/* root of the nat set cache */ struct radix_tree_root nat_set_root;/* root of the nat set cache */
struct rw_semaphore nat_tree_lock; /* protect nat entry tree */ struct f2fs_rwsem nat_tree_lock; /* protect nat entry tree */
struct list_head nat_entries; /* cached nat entry list (clean) */ struct list_head nat_entries; /* cached nat entry list (clean) */
spinlock_t nat_list_lock; /* protect clean nat entry list */ spinlock_t nat_list_lock; /* protect clean nat entry list */
unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */ unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */
@ -1017,7 +1029,7 @@ struct f2fs_sm_info {
struct dirty_seglist_info *dirty_info; /* dirty segment information */ struct dirty_seglist_info *dirty_info; /* dirty segment information */
struct curseg_info *curseg_array; /* active segment information */ struct curseg_info *curseg_array; /* active segment information */
struct rw_semaphore curseg_lock; /* for preventing curseg change */ struct f2fs_rwsem curseg_lock; /* for preventing curseg change */
block_t seg0_blkaddr; /* block address of 0'th segment */ block_t seg0_blkaddr; /* block address of 0'th segment */
block_t main_blkaddr; /* start block address of main area */ block_t main_blkaddr; /* start block address of main area */
@ -1201,11 +1213,11 @@ struct f2fs_bio_info {
struct bio *bio; /* bios to merge */ struct bio *bio; /* bios to merge */
sector_t last_block_in_bio; /* last block number */ sector_t last_block_in_bio; /* last block number */
struct f2fs_io_info fio; /* store buffered io info. */ struct f2fs_io_info fio; /* store buffered io info. */
struct rw_semaphore io_rwsem; /* blocking op for bio */ struct f2fs_rwsem io_rwsem; /* blocking op for bio */
spinlock_t io_lock; /* serialize DATA/NODE IOs */ spinlock_t io_lock; /* serialize DATA/NODE IOs */
struct list_head io_list; /* track fios */ struct list_head io_list; /* track fios */
struct list_head bio_list; /* bio entry list head */ struct list_head bio_list; /* bio entry list head */
struct rw_semaphore bio_list_lock; /* lock to protect bio entry list */ struct f2fs_rwsem bio_list_lock; /* lock to protect bio entry list */
}; };
#define FDEV(i) (sbi->devs[i]) #define FDEV(i) (sbi->devs[i])
@ -1571,7 +1583,7 @@ struct f2fs_sb_info {
struct super_block *sb; /* pointer to VFS super block */ struct super_block *sb; /* pointer to VFS super block */
struct proc_dir_entry *s_proc; /* proc entry */ struct proc_dir_entry *s_proc; /* proc entry */
struct f2fs_super_block *raw_super; /* raw super block pointer */ struct f2fs_super_block *raw_super; /* raw super block pointer */
struct rw_semaphore sb_lock; /* lock for raw super block */ struct f2fs_rwsem sb_lock; /* lock for raw super block */
int valid_super_block; /* valid super block no */ int valid_super_block; /* valid super block no */
unsigned long s_flag; /* flags for sbi */ unsigned long s_flag; /* flags for sbi */
struct mutex writepages; /* mutex for writepages() */ struct mutex writepages; /* mutex for writepages() */
@ -1591,7 +1603,7 @@ struct f2fs_sb_info {
/* for bio operations */ /* for bio operations */
struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */ struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */
/* keep migration IO order for LFS mode */ /* keep migration IO order for LFS mode */
struct rw_semaphore io_order_lock; struct f2fs_rwsem io_order_lock;
mempool_t *write_io_dummy; /* Dummy pages */ mempool_t *write_io_dummy; /* Dummy pages */
/* for checkpoint */ /* for checkpoint */
@ -1599,10 +1611,10 @@ struct f2fs_sb_info {
int cur_cp_pack; /* remain current cp pack */ int cur_cp_pack; /* remain current cp pack */
spinlock_t cp_lock; /* for flag in ckpt */ spinlock_t cp_lock; /* for flag in ckpt */
struct inode *meta_inode; /* cache meta blocks */ struct inode *meta_inode; /* cache meta blocks */
struct rw_semaphore cp_global_sem; /* checkpoint procedure lock */ struct f2fs_rwsem cp_global_sem; /* checkpoint procedure lock */
struct rw_semaphore cp_rwsem; /* blocking FS operations */ struct f2fs_rwsem cp_rwsem; /* blocking FS operations */
struct rw_semaphore node_write; /* locking node writes */ struct f2fs_rwsem node_write; /* locking node writes */
struct rw_semaphore node_change; /* locking node change */ struct f2fs_rwsem node_change; /* locking node change */
wait_queue_head_t cp_wait; wait_queue_head_t cp_wait;
unsigned long last_time[MAX_TIME]; /* to store time in jiffies */ unsigned long last_time[MAX_TIME]; /* to store time in jiffies */
long interval_time[MAX_TIME]; /* to store thresholds */ long interval_time[MAX_TIME]; /* to store thresholds */
@ -1662,7 +1674,7 @@ struct f2fs_sb_info {
block_t unusable_block_count; /* # of blocks saved by last cp */ block_t unusable_block_count; /* # of blocks saved by last cp */
unsigned int nquota_files; /* # of quota sysfile */ unsigned int nquota_files; /* # of quota sysfile */
struct rw_semaphore quota_sem; /* blocking cp for flags */ struct f2fs_rwsem quota_sem; /* blocking cp for flags */
/* # of pages, see count_type */ /* # of pages, see count_type */
atomic_t nr_pages[NR_COUNT_TYPE]; atomic_t nr_pages[NR_COUNT_TYPE];
@ -1678,7 +1690,7 @@ struct f2fs_sb_info {
struct f2fs_mount_info mount_opt; /* mount options */ struct f2fs_mount_info mount_opt; /* mount options */
/* for cleaning operations */ /* for cleaning operations */
struct rw_semaphore gc_lock; /* struct f2fs_rwsem gc_lock; /*
* semaphore for GC, avoid * semaphore for GC, avoid
* race between GC and GC or CP * race between GC and GC or CP
*/ */
@ -1698,7 +1710,7 @@ struct f2fs_sb_info {
/* threshold for gc trials on pinned files */ /* threshold for gc trials on pinned files */
u64 gc_pin_file_threshold; u64 gc_pin_file_threshold;
struct rw_semaphore pin_sem; struct f2fs_rwsem pin_sem;
/* maximum # of trials to find a victim segment for SSR and GC */ /* maximum # of trials to find a victim segment for SSR and GC */
unsigned int max_victim_search; unsigned int max_victim_search;
@ -2092,9 +2104,65 @@ static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
spin_unlock_irqrestore(&sbi->cp_lock, flags); spin_unlock_irqrestore(&sbi->cp_lock, flags);
} }
static inline void init_f2fs_rwsem(struct f2fs_rwsem *sem)
{
init_rwsem(&sem->internal_rwsem);
init_waitqueue_head(&sem->read_waiters);
}
static inline int f2fs_rwsem_is_locked(struct f2fs_rwsem *sem)
{
return rwsem_is_locked(&sem->internal_rwsem);
}
static inline int f2fs_rwsem_is_contended(struct f2fs_rwsem *sem)
{
return rwsem_is_contended(&sem->internal_rwsem);
}
static inline void f2fs_down_read(struct f2fs_rwsem *sem)
{
wait_event(sem->read_waiters, down_read_trylock(&sem->internal_rwsem));
}
static inline int f2fs_down_read_trylock(struct f2fs_rwsem *sem)
{
return down_read_trylock(&sem->internal_rwsem);
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass)
{
down_read_nested(&sem->internal_rwsem, subclass);
}
#else
#define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem)
#endif
static inline void f2fs_up_read(struct f2fs_rwsem *sem)
{
up_read(&sem->internal_rwsem);
}
static inline void f2fs_down_write(struct f2fs_rwsem *sem)
{
down_write(&sem->internal_rwsem);
}
static inline int f2fs_down_write_trylock(struct f2fs_rwsem *sem)
{
return down_write_trylock(&sem->internal_rwsem);
}
static inline void f2fs_up_write(struct f2fs_rwsem *sem)
{
up_write(&sem->internal_rwsem);
wake_up_all(&sem->read_waiters);
}
static inline void f2fs_lock_op(struct f2fs_sb_info *sbi) static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
{ {
down_read(&sbi->cp_rwsem); f2fs_down_read(&sbi->cp_rwsem);
} }
static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi) static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi)
@ -2103,22 +2171,22 @@ static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi)
f2fs_show_injection_info(sbi, FAULT_LOCK_OP); f2fs_show_injection_info(sbi, FAULT_LOCK_OP);
return 0; return 0;
} }
return down_read_trylock(&sbi->cp_rwsem); return f2fs_down_read_trylock(&sbi->cp_rwsem);
} }
static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi) static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
{ {
up_read(&sbi->cp_rwsem); f2fs_up_read(&sbi->cp_rwsem);
} }
static inline void f2fs_lock_all(struct f2fs_sb_info *sbi) static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
{ {
down_write(&sbi->cp_rwsem); f2fs_down_write(&sbi->cp_rwsem);
} }
static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi) static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
{ {
up_write(&sbi->cp_rwsem); f2fs_up_write(&sbi->cp_rwsem);
} }
static inline int __get_cp_reason(struct f2fs_sb_info *sbi) static inline int __get_cp_reason(struct f2fs_sb_info *sbi)

View file

@ -237,13 +237,13 @@ static void try_to_fix_pino(struct inode *inode)
struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_inode_info *fi = F2FS_I(inode);
nid_t pino; nid_t pino;
down_write(&fi->i_sem); f2fs_down_write(&fi->i_sem);
if (file_wrong_pino(inode) && inode->i_nlink == 1 && if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
get_parent_ino(inode, &pino)) { get_parent_ino(inode, &pino)) {
f2fs_i_pino_write(inode, pino); f2fs_i_pino_write(inode, pino);
file_got_pino(inode); file_got_pino(inode);
} }
up_write(&fi->i_sem); f2fs_up_write(&fi->i_sem);
} }
static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end, static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
@ -318,9 +318,9 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
* Both of fdatasync() and fsync() are able to be recovered from * Both of fdatasync() and fsync() are able to be recovered from
* sudden-power-off. * sudden-power-off.
*/ */
down_read(&F2FS_I(inode)->i_sem); f2fs_down_read(&F2FS_I(inode)->i_sem);
cp_reason = need_do_checkpoint(inode); cp_reason = need_do_checkpoint(inode);
up_read(&F2FS_I(inode)->i_sem); f2fs_up_read(&F2FS_I(inode)->i_sem);
if (cp_reason) { if (cp_reason) {
/* all the dirty node pages should be flushed for POR */ /* all the dirty node pages should be flushed for POR */
@ -958,7 +958,7 @@ int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
return err; return err;
} }
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(inode->i_mapping); filemap_invalidate_lock(inode->i_mapping);
truncate_setsize(inode, attr->ia_size); truncate_setsize(inode, attr->ia_size);
@ -970,7 +970,7 @@ int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
* larger than i_size. * larger than i_size.
*/ */
filemap_invalidate_unlock(inode->i_mapping); filemap_invalidate_unlock(inode->i_mapping);
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
if (err) if (err)
return err; return err;
@ -1112,7 +1112,7 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
blk_start = (loff_t)pg_start << PAGE_SHIFT; blk_start = (loff_t)pg_start << PAGE_SHIFT;
blk_end = (loff_t)pg_end << PAGE_SHIFT; blk_end = (loff_t)pg_end << PAGE_SHIFT;
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(inode->i_mapping); filemap_invalidate_lock(inode->i_mapping);
truncate_pagecache_range(inode, blk_start, blk_end - 1); truncate_pagecache_range(inode, blk_start, blk_end - 1);
@ -1122,7 +1122,7 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
f2fs_unlock_op(sbi); f2fs_unlock_op(sbi);
filemap_invalidate_unlock(inode->i_mapping); filemap_invalidate_unlock(inode->i_mapping);
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
} }
} }
@ -1355,7 +1355,7 @@ static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
f2fs_balance_fs(sbi, true); f2fs_balance_fs(sbi, true);
/* avoid gc operation during block exchange */ /* avoid gc operation during block exchange */
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(inode->i_mapping); filemap_invalidate_lock(inode->i_mapping);
f2fs_lock_op(sbi); f2fs_lock_op(sbi);
@ -1365,7 +1365,7 @@ static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
f2fs_unlock_op(sbi); f2fs_unlock_op(sbi);
filemap_invalidate_unlock(inode->i_mapping); filemap_invalidate_unlock(inode->i_mapping);
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
return ret; return ret;
} }
@ -1500,7 +1500,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
unsigned int end_offset; unsigned int end_offset;
pgoff_t end; pgoff_t end;
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(mapping); filemap_invalidate_lock(mapping);
truncate_pagecache_range(inode, truncate_pagecache_range(inode,
@ -1514,7 +1514,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
if (ret) { if (ret) {
f2fs_unlock_op(sbi); f2fs_unlock_op(sbi);
filemap_invalidate_unlock(mapping); filemap_invalidate_unlock(mapping);
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
goto out; goto out;
} }
@ -1526,7 +1526,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
f2fs_unlock_op(sbi); f2fs_unlock_op(sbi);
filemap_invalidate_unlock(mapping); filemap_invalidate_unlock(mapping);
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
f2fs_balance_fs(sbi, dn.node_changed); f2fs_balance_fs(sbi, dn.node_changed);
@ -1600,7 +1600,7 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
/* avoid gc operation during block exchange */ /* avoid gc operation during block exchange */
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(mapping); filemap_invalidate_lock(mapping);
truncate_pagecache(inode, offset); truncate_pagecache(inode, offset);
@ -1618,7 +1618,7 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
f2fs_unlock_op(sbi); f2fs_unlock_op(sbi);
} }
filemap_invalidate_unlock(mapping); filemap_invalidate_unlock(mapping);
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
/* write out all moved pages, if possible */ /* write out all moved pages, if possible */
filemap_invalidate_lock(mapping); filemap_invalidate_lock(mapping);
@ -1674,13 +1674,13 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
next_alloc: next_alloc:
if (has_not_enough_free_secs(sbi, 0, if (has_not_enough_free_secs(sbi, 0,
GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) { GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
down_write(&sbi->gc_lock); f2fs_down_write(&sbi->gc_lock);
err = f2fs_gc(sbi, true, false, false, NULL_SEGNO); err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
if (err && err != -ENODATA && err != -EAGAIN) if (err && err != -ENODATA && err != -EAGAIN)
goto out_err; goto out_err;
} }
down_write(&sbi->pin_sem); f2fs_down_write(&sbi->pin_sem);
f2fs_lock_op(sbi); f2fs_lock_op(sbi);
f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false); f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
@ -1690,7 +1690,7 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO); err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
file_dont_truncate(inode); file_dont_truncate(inode);
up_write(&sbi->pin_sem); f2fs_up_write(&sbi->pin_sem);
expanded += map.m_len; expanded += map.m_len;
sec_len -= map.m_len; sec_len -= map.m_len;
@ -2020,7 +2020,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
if (ret) if (ret)
goto out; goto out;
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
/* /*
* Should wait end_io to count F2FS_WB_CP_DATA correctly by * Should wait end_io to count F2FS_WB_CP_DATA correctly by
@ -2031,7 +2031,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
inode->i_ino, get_dirty_pages(inode)); inode->i_ino, get_dirty_pages(inode));
ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
if (ret) { if (ret) {
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
goto out; goto out;
} }
@ -2044,7 +2044,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
/* add inode in inmem_list first and set atomic_file */ /* add inode in inmem_list first and set atomic_file */
set_inode_flag(inode, FI_ATOMIC_FILE); set_inode_flag(inode, FI_ATOMIC_FILE);
clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST); clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
F2FS_I(inode)->inmem_task = current; F2FS_I(inode)->inmem_task = current;
@ -2351,7 +2351,7 @@ static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
if (err) if (err)
return err; return err;
down_write(&sbi->sb_lock); f2fs_down_write(&sbi->sb_lock);
if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt)) if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
goto got_it; goto got_it;
@ -2370,7 +2370,7 @@ static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
16)) 16))
err = -EFAULT; err = -EFAULT;
out_err: out_err:
up_write(&sbi->sb_lock); f2fs_up_write(&sbi->sb_lock);
mnt_drop_write_file(filp); mnt_drop_write_file(filp);
return err; return err;
} }
@ -2447,12 +2447,12 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
return ret; return ret;
if (!sync) { if (!sync) {
if (!down_write_trylock(&sbi->gc_lock)) { if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
ret = -EBUSY; ret = -EBUSY;
goto out; goto out;
} }
} else { } else {
down_write(&sbi->gc_lock); f2fs_down_write(&sbi->gc_lock);
} }
ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO); ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
@ -2483,12 +2483,12 @@ static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
do_more: do_more:
if (!range->sync) { if (!range->sync) {
if (!down_write_trylock(&sbi->gc_lock)) { if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
ret = -EBUSY; ret = -EBUSY;
goto out; goto out;
} }
} else { } else {
down_write(&sbi->gc_lock); f2fs_down_write(&sbi->gc_lock);
} }
ret = f2fs_gc(sbi, range->sync, true, false, ret = f2fs_gc(sbi, range->sync, true, false,
@ -2820,10 +2820,10 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
f2fs_balance_fs(sbi, true); f2fs_balance_fs(sbi, true);
down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]); f2fs_down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
if (src != dst) { if (src != dst) {
ret = -EBUSY; ret = -EBUSY;
if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE])) if (!f2fs_down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
goto out_src; goto out_src;
} }
@ -2841,9 +2841,9 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
f2fs_unlock_op(sbi); f2fs_unlock_op(sbi);
if (src != dst) if (src != dst)
up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]); f2fs_up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
out_src: out_src:
up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]); f2fs_up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
out_unlock: out_unlock:
if (src != dst) if (src != dst)
inode_unlock(dst); inode_unlock(dst);
@ -2938,7 +2938,7 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
end_segno = min(start_segno + range.segments, dev_end_segno); end_segno = min(start_segno + range.segments, dev_end_segno);
while (start_segno < end_segno) { while (start_segno < end_segno) {
if (!down_write_trylock(&sbi->gc_lock)) { if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
ret = -EBUSY; ret = -EBUSY;
goto out; goto out;
} }
@ -3215,9 +3215,9 @@ int f2fs_precache_extents(struct inode *inode)
while (map.m_lblk < end) { while (map.m_lblk < end) {
map.m_len = end - map.m_lblk; map.m_len = end - map.m_lblk;
down_write(&fi->i_gc_rwsem[WRITE]); f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE); err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
up_write(&fi->i_gc_rwsem[WRITE]); f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
if (err) if (err)
return err; return err;
@ -3294,11 +3294,11 @@ static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
if (!vbuf) if (!vbuf)
return -ENOMEM; return -ENOMEM;
down_read(&sbi->sb_lock); f2fs_down_read(&sbi->sb_lock);
count = utf16s_to_utf8s(sbi->raw_super->volume_name, count = utf16s_to_utf8s(sbi->raw_super->volume_name,
ARRAY_SIZE(sbi->raw_super->volume_name), ARRAY_SIZE(sbi->raw_super->volume_name),
UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME); UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
up_read(&sbi->sb_lock); f2fs_up_read(&sbi->sb_lock);
if (copy_to_user((char __user *)arg, vbuf, if (copy_to_user((char __user *)arg, vbuf,
min(FSLABEL_MAX, count))) min(FSLABEL_MAX, count)))
@ -3326,7 +3326,7 @@ static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
if (err) if (err)
goto out; goto out;
down_write(&sbi->sb_lock); f2fs_down_write(&sbi->sb_lock);
memset(sbi->raw_super->volume_name, 0, memset(sbi->raw_super->volume_name, 0,
sizeof(sbi->raw_super->volume_name)); sizeof(sbi->raw_super->volume_name));
@ -3336,7 +3336,7 @@ static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
err = f2fs_commit_super(sbi, false); err = f2fs_commit_super(sbi, false);
up_write(&sbi->sb_lock); f2fs_up_write(&sbi->sb_lock);
mnt_drop_write_file(filp); mnt_drop_write_file(filp);
out: out:
@ -3462,7 +3462,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
if (!atomic_read(&F2FS_I(inode)->i_compr_blocks)) if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
goto out; goto out;
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(inode->i_mapping); filemap_invalidate_lock(inode->i_mapping);
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
@ -3499,7 +3499,7 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
} }
filemap_invalidate_unlock(inode->i_mapping); filemap_invalidate_unlock(inode->i_mapping);
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
out: out:
inode_unlock(inode); inode_unlock(inode);
@ -3615,7 +3615,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
goto unlock_inode; goto unlock_inode;
} }
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(inode->i_mapping); filemap_invalidate_lock(inode->i_mapping);
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
@ -3652,7 +3652,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
} }
filemap_invalidate_unlock(inode->i_mapping); filemap_invalidate_unlock(inode->i_mapping);
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
if (ret >= 0) { if (ret >= 0) {
clear_inode_flag(inode, FI_COMPRESS_RELEASED); clear_inode_flag(inode, FI_COMPRESS_RELEASED);
@ -3770,7 +3770,7 @@ static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
if (ret) if (ret)
goto err; goto err;
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(mapping); filemap_invalidate_lock(mapping);
ret = filemap_write_and_wait_range(mapping, range.start, ret = filemap_write_and_wait_range(mapping, range.start,
@ -3859,7 +3859,7 @@ static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
prev_block, len, range.flags); prev_block, len, range.flags);
out: out:
filemap_invalidate_unlock(mapping); filemap_invalidate_unlock(mapping);
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
err: err:
inode_unlock(inode); inode_unlock(inode);
file_end_write(filp); file_end_write(filp);
@ -4291,12 +4291,12 @@ static ssize_t f2fs_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
trace_f2fs_direct_IO_enter(inode, iocb, count, READ); trace_f2fs_direct_IO_enter(inode, iocb, count, READ);
if (iocb->ki_flags & IOCB_NOWAIT) { if (iocb->ki_flags & IOCB_NOWAIT) {
if (!down_read_trylock(&fi->i_gc_rwsem[READ])) { if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) {
ret = -EAGAIN; ret = -EAGAIN;
goto out; goto out;
} }
} else { } else {
down_read(&fi->i_gc_rwsem[READ]); f2fs_down_read(&fi->i_gc_rwsem[READ]);
} }
/* /*
@ -4315,7 +4315,7 @@ static ssize_t f2fs_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
ret = iomap_dio_complete(dio); ret = iomap_dio_complete(dio);
} }
up_read(&fi->i_gc_rwsem[READ]); f2fs_up_read(&fi->i_gc_rwsem[READ]);
file_accessed(file); file_accessed(file);
out: out:
@ -4497,12 +4497,12 @@ static ssize_t f2fs_dio_write_iter(struct kiocb *iocb, struct iov_iter *from,
goto out; goto out;
} }
if (!down_read_trylock(&fi->i_gc_rwsem[WRITE])) { if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[WRITE])) {
ret = -EAGAIN; ret = -EAGAIN;
goto out; goto out;
} }
if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) { if (do_opu && !f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) {
up_read(&fi->i_gc_rwsem[WRITE]); f2fs_up_read(&fi->i_gc_rwsem[WRITE]);
ret = -EAGAIN; ret = -EAGAIN;
goto out; goto out;
} }
@ -4511,9 +4511,9 @@ static ssize_t f2fs_dio_write_iter(struct kiocb *iocb, struct iov_iter *from,
if (ret) if (ret)
goto out; goto out;
down_read(&fi->i_gc_rwsem[WRITE]); f2fs_down_read(&fi->i_gc_rwsem[WRITE]);
if (do_opu) if (do_opu)
down_read(&fi->i_gc_rwsem[READ]); f2fs_down_read(&fi->i_gc_rwsem[READ]);
} }
if (whint_mode == WHINT_MODE_OFF) if (whint_mode == WHINT_MODE_OFF)
iocb->ki_hint = WRITE_LIFE_NOT_SET; iocb->ki_hint = WRITE_LIFE_NOT_SET;
@ -4542,8 +4542,8 @@ static ssize_t f2fs_dio_write_iter(struct kiocb *iocb, struct iov_iter *from,
if (whint_mode == WHINT_MODE_OFF) if (whint_mode == WHINT_MODE_OFF)
iocb->ki_hint = hint; iocb->ki_hint = hint;
if (do_opu) if (do_opu)
up_read(&fi->i_gc_rwsem[READ]); f2fs_up_read(&fi->i_gc_rwsem[READ]);
up_read(&fi->i_gc_rwsem[WRITE]); f2fs_up_read(&fi->i_gc_rwsem[WRITE]);
if (ret < 0) if (ret < 0)
goto out; goto out;
@ -4644,12 +4644,12 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
/* Don't leave any preallocated blocks around past i_size. */ /* Don't leave any preallocated blocks around past i_size. */
if (preallocated && i_size_read(inode) < target_size) { if (preallocated && i_size_read(inode) < target_size) {
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(inode->i_mapping); filemap_invalidate_lock(inode->i_mapping);
if (!f2fs_truncate(inode)) if (!f2fs_truncate(inode))
file_dont_truncate(inode); file_dont_truncate(inode);
filemap_invalidate_unlock(inode->i_mapping); filemap_invalidate_unlock(inode->i_mapping);
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
} else { } else {
file_dont_truncate(inode); file_dont_truncate(inode);
} }

View file

@ -105,21 +105,21 @@ static int gc_thread_func(void *data)
spin_unlock(&sbi->gc_urgent_high_lock); spin_unlock(&sbi->gc_urgent_high_lock);
wait_ms = gc_th->urgent_sleep_time; wait_ms = gc_th->urgent_sleep_time;
down_write(&sbi->gc_lock); f2fs_down_write(&sbi->gc_lock);
goto do_gc; goto do_gc;
} }
if (foreground) { if (foreground) {
down_write(&sbi->gc_lock); f2fs_down_write(&sbi->gc_lock);
goto do_gc; goto do_gc;
} else if (!down_write_trylock(&sbi->gc_lock)) { } else if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
stat_other_skip_bggc_count(sbi); stat_other_skip_bggc_count(sbi);
goto next; goto next;
} }
if (!is_idle(sbi, GC_TIME)) { if (!is_idle(sbi, GC_TIME)) {
increase_sleep_time(gc_th, &wait_ms); increase_sleep_time(gc_th, &wait_ms);
up_write(&sbi->gc_lock); f2fs_up_write(&sbi->gc_lock);
stat_io_skip_bggc_count(sbi); stat_io_skip_bggc_count(sbi);
goto next; goto next;
} }
@ -1230,7 +1230,7 @@ static int move_data_block(struct inode *inode, block_t bidx,
fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr; fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
if (lfs_mode) if (lfs_mode)
down_write(&fio.sbi->io_order_lock); f2fs_down_write(&fio.sbi->io_order_lock);
mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi), mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
fio.old_blkaddr, false); fio.old_blkaddr, false);
@ -1316,7 +1316,7 @@ static int move_data_block(struct inode *inode, block_t bidx,
true, true, true); true, true, true);
up_out: up_out:
if (lfs_mode) if (lfs_mode)
up_write(&fio.sbi->io_order_lock); f2fs_up_write(&fio.sbi->io_order_lock);
put_out: put_out:
f2fs_put_dnode(&dn); f2fs_put_dnode(&dn);
out: out:
@ -1475,7 +1475,7 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
special_file(inode->i_mode)) special_file(inode->i_mode))
continue; continue;
if (!down_write_trylock( if (!f2fs_down_write_trylock(
&F2FS_I(inode)->i_gc_rwsem[WRITE])) { &F2FS_I(inode)->i_gc_rwsem[WRITE])) {
iput(inode); iput(inode);
sbi->skipped_gc_rwsem++; sbi->skipped_gc_rwsem++;
@ -1488,7 +1488,7 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
if (f2fs_post_read_required(inode)) { if (f2fs_post_read_required(inode)) {
int err = ra_data_block(inode, start_bidx); int err = ra_data_block(inode, start_bidx);
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
if (err) { if (err) {
iput(inode); iput(inode);
continue; continue;
@ -1499,7 +1499,7 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
data_page = f2fs_get_read_data_page(inode, data_page = f2fs_get_read_data_page(inode,
start_bidx, REQ_RAHEAD, true); start_bidx, REQ_RAHEAD, true);
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
if (IS_ERR(data_page)) { if (IS_ERR(data_page)) {
iput(inode); iput(inode);
continue; continue;
@ -1518,14 +1518,14 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
int err; int err;
if (S_ISREG(inode->i_mode)) { if (S_ISREG(inode->i_mode)) {
if (!down_write_trylock(&fi->i_gc_rwsem[READ])) { if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[READ])) {
sbi->skipped_gc_rwsem++; sbi->skipped_gc_rwsem++;
continue; continue;
} }
if (!down_write_trylock( if (!f2fs_down_write_trylock(
&fi->i_gc_rwsem[WRITE])) { &fi->i_gc_rwsem[WRITE])) {
sbi->skipped_gc_rwsem++; sbi->skipped_gc_rwsem++;
up_write(&fi->i_gc_rwsem[READ]); f2fs_up_write(&fi->i_gc_rwsem[READ]);
continue; continue;
} }
locked = true; locked = true;
@ -1548,8 +1548,8 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
submitted++; submitted++;
if (locked) { if (locked) {
up_write(&fi->i_gc_rwsem[WRITE]); f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
up_write(&fi->i_gc_rwsem[READ]); f2fs_up_write(&fi->i_gc_rwsem[READ]);
} }
stat_inc_data_blk_count(sbi, 1, gc_type); stat_inc_data_blk_count(sbi, 1, gc_type);
@ -1807,7 +1807,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
reserved_segments(sbi), reserved_segments(sbi),
prefree_segments(sbi)); prefree_segments(sbi));
up_write(&sbi->gc_lock); f2fs_up_write(&sbi->gc_lock);
put_gc_inode(&gc_list); put_gc_inode(&gc_list);
@ -1936,7 +1936,7 @@ static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
long long block_count; long long block_count;
int segs = secs * sbi->segs_per_sec; int segs = secs * sbi->segs_per_sec;
down_write(&sbi->sb_lock); f2fs_down_write(&sbi->sb_lock);
section_count = le32_to_cpu(raw_sb->section_count); section_count = le32_to_cpu(raw_sb->section_count);
segment_count = le32_to_cpu(raw_sb->segment_count); segment_count = le32_to_cpu(raw_sb->segment_count);
@ -1957,7 +1957,7 @@ static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
cpu_to_le32(dev_segs + segs); cpu_to_le32(dev_segs + segs);
} }
up_write(&sbi->sb_lock); f2fs_up_write(&sbi->sb_lock);
} }
static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs) static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
@ -2031,7 +2031,7 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi)); secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
/* stop other GC */ /* stop other GC */
if (!down_write_trylock(&sbi->gc_lock)) if (!f2fs_down_write_trylock(&sbi->gc_lock))
return -EAGAIN; return -EAGAIN;
/* stop CP to protect MAIN_SEC in free_segment_range */ /* stop CP to protect MAIN_SEC in free_segment_range */
@ -2051,15 +2051,15 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
out_unlock: out_unlock:
f2fs_unlock_op(sbi); f2fs_unlock_op(sbi);
up_write(&sbi->gc_lock); f2fs_up_write(&sbi->gc_lock);
if (err) if (err)
return err; return err;
set_sbi_flag(sbi, SBI_IS_RESIZEFS); set_sbi_flag(sbi, SBI_IS_RESIZEFS);
freeze_super(sbi->sb); freeze_super(sbi->sb);
down_write(&sbi->gc_lock); f2fs_down_write(&sbi->gc_lock);
down_write(&sbi->cp_global_sem); f2fs_down_write(&sbi->cp_global_sem);
spin_lock(&sbi->stat_lock); spin_lock(&sbi->stat_lock);
if (shrunk_blocks + valid_user_blocks(sbi) + if (shrunk_blocks + valid_user_blocks(sbi) +
@ -2104,8 +2104,8 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
spin_unlock(&sbi->stat_lock); spin_unlock(&sbi->stat_lock);
} }
out_err: out_err:
up_write(&sbi->cp_global_sem); f2fs_up_write(&sbi->cp_global_sem);
up_write(&sbi->gc_lock); f2fs_up_write(&sbi->gc_lock);
thaw_super(sbi->sb); thaw_super(sbi->sb);
clear_sbi_flag(sbi, SBI_IS_RESIZEFS); clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
return err; return err;

View file

@ -629,7 +629,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
} }
if (inode) { if (inode) {
down_write(&F2FS_I(inode)->i_sem); f2fs_down_write(&F2FS_I(inode)->i_sem);
page = f2fs_init_inode_metadata(inode, dir, fname, ipage); page = f2fs_init_inode_metadata(inode, dir, fname, ipage);
if (IS_ERR(page)) { if (IS_ERR(page)) {
err = PTR_ERR(page); err = PTR_ERR(page);
@ -658,7 +658,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
f2fs_update_parent_metadata(dir, inode, 0); f2fs_update_parent_metadata(dir, inode, 0);
fail: fail:
if (inode) if (inode)
up_write(&F2FS_I(inode)->i_sem); f2fs_up_write(&F2FS_I(inode)->i_sem);
out: out:
f2fs_put_page(ipage, 1); f2fs_put_page(ipage, 1);
return err; return err;

View file

@ -196,7 +196,7 @@ static inline void set_file_temperature(struct f2fs_sb_info *sbi, struct inode *
__u8 (*extlist)[F2FS_EXTENSION_LEN] = sbi->raw_super->extension_list; __u8 (*extlist)[F2FS_EXTENSION_LEN] = sbi->raw_super->extension_list;
int i, cold_count, hot_count; int i, cold_count, hot_count;
down_read(&sbi->sb_lock); f2fs_down_read(&sbi->sb_lock);
cold_count = le32_to_cpu(sbi->raw_super->extension_count); cold_count = le32_to_cpu(sbi->raw_super->extension_count);
hot_count = sbi->raw_super->hot_ext_count; hot_count = sbi->raw_super->hot_ext_count;
@ -206,7 +206,7 @@ static inline void set_file_temperature(struct f2fs_sb_info *sbi, struct inode *
break; break;
} }
up_read(&sbi->sb_lock); f2fs_up_read(&sbi->sb_lock);
if (i == cold_count + hot_count) if (i == cold_count + hot_count)
return; return;
@ -299,19 +299,19 @@ static void set_compress_inode(struct f2fs_sb_info *sbi, struct inode *inode,
(!ext_cnt && !noext_cnt)) (!ext_cnt && !noext_cnt))
return; return;
down_read(&sbi->sb_lock); f2fs_down_read(&sbi->sb_lock);
cold_count = le32_to_cpu(sbi->raw_super->extension_count); cold_count = le32_to_cpu(sbi->raw_super->extension_count);
hot_count = sbi->raw_super->hot_ext_count; hot_count = sbi->raw_super->hot_ext_count;
for (i = cold_count; i < cold_count + hot_count; i++) { for (i = cold_count; i < cold_count + hot_count; i++) {
if (is_extension_exist(name, extlist[i], false)) { if (is_extension_exist(name, extlist[i], false)) {
up_read(&sbi->sb_lock); f2fs_up_read(&sbi->sb_lock);
return; return;
} }
} }
up_read(&sbi->sb_lock); f2fs_up_read(&sbi->sb_lock);
for (i = 0; i < noext_cnt; i++) { for (i = 0; i < noext_cnt; i++) {
if (is_extension_exist(name, noext[i], false)) { if (is_extension_exist(name, noext[i], false)) {
@ -1023,11 +1023,11 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
new_page = NULL; new_page = NULL;
new_inode->i_ctime = current_time(new_inode); new_inode->i_ctime = current_time(new_inode);
down_write(&F2FS_I(new_inode)->i_sem); f2fs_down_write(&F2FS_I(new_inode)->i_sem);
if (old_dir_entry) if (old_dir_entry)
f2fs_i_links_write(new_inode, false); f2fs_i_links_write(new_inode, false);
f2fs_i_links_write(new_inode, false); f2fs_i_links_write(new_inode, false);
up_write(&F2FS_I(new_inode)->i_sem); f2fs_up_write(&F2FS_I(new_inode)->i_sem);
if (!new_inode->i_nlink) if (!new_inode->i_nlink)
f2fs_add_orphan_inode(new_inode); f2fs_add_orphan_inode(new_inode);
@ -1048,13 +1048,13 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
f2fs_i_links_write(new_dir, true); f2fs_i_links_write(new_dir, true);
} }
down_write(&F2FS_I(old_inode)->i_sem); f2fs_down_write(&F2FS_I(old_inode)->i_sem);
if (!old_dir_entry || whiteout) if (!old_dir_entry || whiteout)
file_lost_pino(old_inode); file_lost_pino(old_inode);
else else
/* adjust dir's i_pino to pass fsck check */ /* adjust dir's i_pino to pass fsck check */
f2fs_i_pino_write(old_inode, new_dir->i_ino); f2fs_i_pino_write(old_inode, new_dir->i_ino);
up_write(&F2FS_I(old_inode)->i_sem); f2fs_up_write(&F2FS_I(old_inode)->i_sem);
old_inode->i_ctime = current_time(old_inode); old_inode->i_ctime = current_time(old_inode);
f2fs_mark_inode_dirty_sync(old_inode, false); f2fs_mark_inode_dirty_sync(old_inode, false);
@ -1214,38 +1214,38 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
/* update directory entry info of old dir inode */ /* update directory entry info of old dir inode */
f2fs_set_link(old_dir, old_entry, old_page, new_inode); f2fs_set_link(old_dir, old_entry, old_page, new_inode);
down_write(&F2FS_I(old_inode)->i_sem); f2fs_down_write(&F2FS_I(old_inode)->i_sem);
if (!old_dir_entry) if (!old_dir_entry)
file_lost_pino(old_inode); file_lost_pino(old_inode);
else else
/* adjust dir's i_pino to pass fsck check */ /* adjust dir's i_pino to pass fsck check */
f2fs_i_pino_write(old_inode, new_dir->i_ino); f2fs_i_pino_write(old_inode, new_dir->i_ino);
up_write(&F2FS_I(old_inode)->i_sem); f2fs_up_write(&F2FS_I(old_inode)->i_sem);
old_dir->i_ctime = current_time(old_dir); old_dir->i_ctime = current_time(old_dir);
if (old_nlink) { if (old_nlink) {
down_write(&F2FS_I(old_dir)->i_sem); f2fs_down_write(&F2FS_I(old_dir)->i_sem);
f2fs_i_links_write(old_dir, old_nlink > 0); f2fs_i_links_write(old_dir, old_nlink > 0);
up_write(&F2FS_I(old_dir)->i_sem); f2fs_up_write(&F2FS_I(old_dir)->i_sem);
} }
f2fs_mark_inode_dirty_sync(old_dir, false); f2fs_mark_inode_dirty_sync(old_dir, false);
/* update directory entry info of new dir inode */ /* update directory entry info of new dir inode */
f2fs_set_link(new_dir, new_entry, new_page, old_inode); f2fs_set_link(new_dir, new_entry, new_page, old_inode);
down_write(&F2FS_I(new_inode)->i_sem); f2fs_down_write(&F2FS_I(new_inode)->i_sem);
if (!new_dir_entry) if (!new_dir_entry)
file_lost_pino(new_inode); file_lost_pino(new_inode);
else else
/* adjust dir's i_pino to pass fsck check */ /* adjust dir's i_pino to pass fsck check */
f2fs_i_pino_write(new_inode, old_dir->i_ino); f2fs_i_pino_write(new_inode, old_dir->i_ino);
up_write(&F2FS_I(new_inode)->i_sem); f2fs_up_write(&F2FS_I(new_inode)->i_sem);
new_dir->i_ctime = current_time(new_dir); new_dir->i_ctime = current_time(new_dir);
if (new_nlink) { if (new_nlink) {
down_write(&F2FS_I(new_dir)->i_sem); f2fs_down_write(&F2FS_I(new_dir)->i_sem);
f2fs_i_links_write(new_dir, new_nlink > 0); f2fs_i_links_write(new_dir, new_nlink > 0);
up_write(&F2FS_I(new_dir)->i_sem); f2fs_up_write(&F2FS_I(new_dir)->i_sem);
} }
f2fs_mark_inode_dirty_sync(new_dir, false); f2fs_mark_inode_dirty_sync(new_dir, false);

View file

@ -382,14 +382,14 @@ int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
struct nat_entry *e; struct nat_entry *e;
bool need = false; bool need = false;
down_read(&nm_i->nat_tree_lock); f2fs_down_read(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, nid); e = __lookup_nat_cache(nm_i, nid);
if (e) { if (e) {
if (!get_nat_flag(e, IS_CHECKPOINTED) && if (!get_nat_flag(e, IS_CHECKPOINTED) &&
!get_nat_flag(e, HAS_FSYNCED_INODE)) !get_nat_flag(e, HAS_FSYNCED_INODE))
need = true; need = true;
} }
up_read(&nm_i->nat_tree_lock); f2fs_up_read(&nm_i->nat_tree_lock);
return need; return need;
} }
@ -399,11 +399,11 @@ bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
struct nat_entry *e; struct nat_entry *e;
bool is_cp = true; bool is_cp = true;
down_read(&nm_i->nat_tree_lock); f2fs_down_read(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, nid); e = __lookup_nat_cache(nm_i, nid);
if (e && !get_nat_flag(e, IS_CHECKPOINTED)) if (e && !get_nat_flag(e, IS_CHECKPOINTED))
is_cp = false; is_cp = false;
up_read(&nm_i->nat_tree_lock); f2fs_up_read(&nm_i->nat_tree_lock);
return is_cp; return is_cp;
} }
@ -413,13 +413,13 @@ bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
struct nat_entry *e; struct nat_entry *e;
bool need_update = true; bool need_update = true;
down_read(&nm_i->nat_tree_lock); f2fs_down_read(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, ino); e = __lookup_nat_cache(nm_i, ino);
if (e && get_nat_flag(e, HAS_LAST_FSYNC) && if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
(get_nat_flag(e, IS_CHECKPOINTED) || (get_nat_flag(e, IS_CHECKPOINTED) ||
get_nat_flag(e, HAS_FSYNCED_INODE))) get_nat_flag(e, HAS_FSYNCED_INODE)))
need_update = false; need_update = false;
up_read(&nm_i->nat_tree_lock); f2fs_up_read(&nm_i->nat_tree_lock);
return need_update; return need_update;
} }
@ -431,14 +431,14 @@ static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
struct nat_entry *new, *e; struct nat_entry *new, *e;
/* Let's mitigate lock contention of nat_tree_lock during checkpoint */ /* Let's mitigate lock contention of nat_tree_lock during checkpoint */
if (rwsem_is_locked(&sbi->cp_global_sem)) if (f2fs_rwsem_is_locked(&sbi->cp_global_sem))
return; return;
new = __alloc_nat_entry(sbi, nid, false); new = __alloc_nat_entry(sbi, nid, false);
if (!new) if (!new)
return; return;
down_write(&nm_i->nat_tree_lock); f2fs_down_write(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, nid); e = __lookup_nat_cache(nm_i, nid);
if (!e) if (!e)
e = __init_nat_entry(nm_i, new, ne, false); e = __init_nat_entry(nm_i, new, ne, false);
@ -447,7 +447,7 @@ static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
nat_get_blkaddr(e) != nat_get_blkaddr(e) !=
le32_to_cpu(ne->block_addr) || le32_to_cpu(ne->block_addr) ||
nat_get_version(e) != ne->version); nat_get_version(e) != ne->version);
up_write(&nm_i->nat_tree_lock); f2fs_up_write(&nm_i->nat_tree_lock);
if (e != new) if (e != new)
__free_nat_entry(new); __free_nat_entry(new);
} }
@ -459,7 +459,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
struct nat_entry *e; struct nat_entry *e;
struct nat_entry *new = __alloc_nat_entry(sbi, ni->nid, true); struct nat_entry *new = __alloc_nat_entry(sbi, ni->nid, true);
down_write(&nm_i->nat_tree_lock); f2fs_down_write(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, ni->nid); e = __lookup_nat_cache(nm_i, ni->nid);
if (!e) { if (!e) {
e = __init_nat_entry(nm_i, new, NULL, true); e = __init_nat_entry(nm_i, new, NULL, true);
@ -508,7 +508,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
set_nat_flag(e, HAS_FSYNCED_INODE, true); set_nat_flag(e, HAS_FSYNCED_INODE, true);
set_nat_flag(e, HAS_LAST_FSYNC, fsync_done); set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
} }
up_write(&nm_i->nat_tree_lock); f2fs_up_write(&nm_i->nat_tree_lock);
} }
int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
@ -516,7 +516,7 @@ int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
struct f2fs_nm_info *nm_i = NM_I(sbi); struct f2fs_nm_info *nm_i = NM_I(sbi);
int nr = nr_shrink; int nr = nr_shrink;
if (!down_write_trylock(&nm_i->nat_tree_lock)) if (!f2fs_down_write_trylock(&nm_i->nat_tree_lock))
return 0; return 0;
spin_lock(&nm_i->nat_list_lock); spin_lock(&nm_i->nat_list_lock);
@ -538,7 +538,7 @@ int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
} }
spin_unlock(&nm_i->nat_list_lock); spin_unlock(&nm_i->nat_list_lock);
up_write(&nm_i->nat_tree_lock); f2fs_up_write(&nm_i->nat_tree_lock);
return nr - nr_shrink; return nr - nr_shrink;
} }
@ -560,13 +560,13 @@ int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
ni->nid = nid; ni->nid = nid;
retry: retry:
/* Check nat cache */ /* Check nat cache */
down_read(&nm_i->nat_tree_lock); f2fs_down_read(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, nid); e = __lookup_nat_cache(nm_i, nid);
if (e) { if (e) {
ni->ino = nat_get_ino(e); ni->ino = nat_get_ino(e);
ni->blk_addr = nat_get_blkaddr(e); ni->blk_addr = nat_get_blkaddr(e);
ni->version = nat_get_version(e); ni->version = nat_get_version(e);
up_read(&nm_i->nat_tree_lock); f2fs_up_read(&nm_i->nat_tree_lock);
return 0; return 0;
} }
@ -576,11 +576,11 @@ int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
* nat_tree_lock. Therefore, we should retry, if we failed to grab here * nat_tree_lock. Therefore, we should retry, if we failed to grab here
* while not bothering checkpoint. * while not bothering checkpoint.
*/ */
if (!rwsem_is_locked(&sbi->cp_global_sem) || checkpoint_context) { if (!f2fs_rwsem_is_locked(&sbi->cp_global_sem) || checkpoint_context) {
down_read(&curseg->journal_rwsem); down_read(&curseg->journal_rwsem);
} else if (rwsem_is_contended(&nm_i->nat_tree_lock) || } else if (f2fs_rwsem_is_contended(&nm_i->nat_tree_lock) ||
!down_read_trylock(&curseg->journal_rwsem)) { !down_read_trylock(&curseg->journal_rwsem)) {
up_read(&nm_i->nat_tree_lock); f2fs_up_read(&nm_i->nat_tree_lock);
goto retry; goto retry;
} }
@ -589,15 +589,15 @@ int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
ne = nat_in_journal(journal, i); ne = nat_in_journal(journal, i);
node_info_from_raw_nat(ni, &ne); node_info_from_raw_nat(ni, &ne);
} }
up_read(&curseg->journal_rwsem); up_read(&curseg->journal_rwsem);
if (i >= 0) { if (i >= 0) {
up_read(&nm_i->nat_tree_lock); f2fs_up_read(&nm_i->nat_tree_lock);
goto cache; goto cache;
} }
/* Fill node_info from nat page */ /* Fill node_info from nat page */
index = current_nat_addr(sbi, nid); index = current_nat_addr(sbi, nid);
up_read(&nm_i->nat_tree_lock); f2fs_up_read(&nm_i->nat_tree_lock);
page = f2fs_get_meta_page(sbi, index); page = f2fs_get_meta_page(sbi, index);
if (IS_ERR(page)) if (IS_ERR(page))
@ -1609,17 +1609,17 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
goto redirty_out; goto redirty_out;
if (wbc->for_reclaim) { if (wbc->for_reclaim) {
if (!down_read_trylock(&sbi->node_write)) if (!f2fs_down_read_trylock(&sbi->node_write))
goto redirty_out; goto redirty_out;
} else { } else {
down_read(&sbi->node_write); f2fs_down_read(&sbi->node_write);
} }
/* This page is already truncated */ /* This page is already truncated */
if (unlikely(ni.blk_addr == NULL_ADDR)) { if (unlikely(ni.blk_addr == NULL_ADDR)) {
ClearPageUptodate(page); ClearPageUptodate(page);
dec_page_count(sbi, F2FS_DIRTY_NODES); dec_page_count(sbi, F2FS_DIRTY_NODES);
up_read(&sbi->node_write); f2fs_up_read(&sbi->node_write);
unlock_page(page); unlock_page(page);
return 0; return 0;
} }
@ -1627,7 +1627,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
if (__is_valid_data_blkaddr(ni.blk_addr) && if (__is_valid_data_blkaddr(ni.blk_addr) &&
!f2fs_is_valid_blkaddr(sbi, ni.blk_addr, !f2fs_is_valid_blkaddr(sbi, ni.blk_addr,
DATA_GENERIC_ENHANCE)) { DATA_GENERIC_ENHANCE)) {
up_read(&sbi->node_write); f2fs_up_read(&sbi->node_write);
goto redirty_out; goto redirty_out;
} }
@ -1648,7 +1648,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
f2fs_do_write_node_page(nid, &fio); f2fs_do_write_node_page(nid, &fio);
set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page)); set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
dec_page_count(sbi, F2FS_DIRTY_NODES); dec_page_count(sbi, F2FS_DIRTY_NODES);
up_read(&sbi->node_write); f2fs_up_read(&sbi->node_write);
if (wbc->for_reclaim) { if (wbc->for_reclaim) {
f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE); f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE);
@ -2225,14 +2225,14 @@ bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi)
unsigned int i; unsigned int i;
bool ret = true; bool ret = true;
down_read(&nm_i->nat_tree_lock); f2fs_down_read(&nm_i->nat_tree_lock);
for (i = 0; i < nm_i->nat_blocks; i++) { for (i = 0; i < nm_i->nat_blocks; i++) {
if (!test_bit_le(i, nm_i->nat_block_bitmap)) { if (!test_bit_le(i, nm_i->nat_block_bitmap)) {
ret = false; ret = false;
break; break;
} }
} }
up_read(&nm_i->nat_tree_lock); f2fs_up_read(&nm_i->nat_tree_lock);
return ret; return ret;
} }
@ -2415,7 +2415,7 @@ static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
unsigned int i, idx; unsigned int i, idx;
nid_t nid; nid_t nid;
down_read(&nm_i->nat_tree_lock); f2fs_down_read(&nm_i->nat_tree_lock);
for (i = 0; i < nm_i->nat_blocks; i++) { for (i = 0; i < nm_i->nat_blocks; i++) {
if (!test_bit_le(i, nm_i->nat_block_bitmap)) if (!test_bit_le(i, nm_i->nat_block_bitmap))
@ -2438,7 +2438,7 @@ static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
out: out:
scan_curseg_cache(sbi); scan_curseg_cache(sbi);
up_read(&nm_i->nat_tree_lock); f2fs_up_read(&nm_i->nat_tree_lock);
} }
static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi, static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
@ -2473,7 +2473,7 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
META_NAT, true); META_NAT, true);
down_read(&nm_i->nat_tree_lock); f2fs_down_read(&nm_i->nat_tree_lock);
while (1) { while (1) {
if (!test_bit_le(NAT_BLOCK_OFFSET(nid), if (!test_bit_le(NAT_BLOCK_OFFSET(nid),
@ -2488,7 +2488,7 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
} }
if (ret) { if (ret) {
up_read(&nm_i->nat_tree_lock); f2fs_up_read(&nm_i->nat_tree_lock);
f2fs_err(sbi, "NAT is corrupt, run fsck to fix it"); f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
return ret; return ret;
} }
@ -2508,7 +2508,7 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
/* find free nids from current sum_pages */ /* find free nids from current sum_pages */
scan_curseg_cache(sbi); scan_curseg_cache(sbi);
up_read(&nm_i->nat_tree_lock); f2fs_up_read(&nm_i->nat_tree_lock);
f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid), f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
nm_i->ra_nid_pages, META_NAT, false); nm_i->ra_nid_pages, META_NAT, false);
@ -2953,7 +2953,7 @@ void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi)
struct f2fs_nm_info *nm_i = NM_I(sbi); struct f2fs_nm_info *nm_i = NM_I(sbi);
unsigned int nat_ofs; unsigned int nat_ofs;
down_read(&nm_i->nat_tree_lock); f2fs_down_read(&nm_i->nat_tree_lock);
for (nat_ofs = 0; nat_ofs < nm_i->nat_blocks; nat_ofs++) { for (nat_ofs = 0; nat_ofs < nm_i->nat_blocks; nat_ofs++) {
unsigned int valid = 0, nid_ofs = 0; unsigned int valid = 0, nid_ofs = 0;
@ -2973,7 +2973,7 @@ void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi)
__update_nat_bits(nm_i, nat_ofs, valid); __update_nat_bits(nm_i, nat_ofs, valid);
} }
up_read(&nm_i->nat_tree_lock); f2fs_up_read(&nm_i->nat_tree_lock);
} }
static int __flush_nat_entry_set(struct f2fs_sb_info *sbi, static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
@ -3071,15 +3071,15 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
* nat_cnt[DIRTY_NAT]. * nat_cnt[DIRTY_NAT].
*/ */
if (cpc->reason & CP_UMOUNT) { if (cpc->reason & CP_UMOUNT) {
down_write(&nm_i->nat_tree_lock); f2fs_down_write(&nm_i->nat_tree_lock);
remove_nats_in_journal(sbi); remove_nats_in_journal(sbi);
up_write(&nm_i->nat_tree_lock); f2fs_up_write(&nm_i->nat_tree_lock);
} }
if (!nm_i->nat_cnt[DIRTY_NAT]) if (!nm_i->nat_cnt[DIRTY_NAT])
return 0; return 0;
down_write(&nm_i->nat_tree_lock); f2fs_down_write(&nm_i->nat_tree_lock);
/* /*
* if there are no enough space in journal to store dirty nat * if there are no enough space in journal to store dirty nat
@ -3108,7 +3108,7 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
break; break;
} }
up_write(&nm_i->nat_tree_lock); f2fs_up_write(&nm_i->nat_tree_lock);
/* Allow dirty nats by node block allocation in write_begin */ /* Allow dirty nats by node block allocation in write_begin */
return err; return err;
@ -3228,7 +3228,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
mutex_init(&nm_i->build_lock); mutex_init(&nm_i->build_lock);
spin_lock_init(&nm_i->nid_list_lock); spin_lock_init(&nm_i->nid_list_lock);
init_rwsem(&nm_i->nat_tree_lock); init_f2fs_rwsem(&nm_i->nat_tree_lock);
nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
@ -3334,7 +3334,7 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
spin_unlock(&nm_i->nid_list_lock); spin_unlock(&nm_i->nid_list_lock);
/* destroy nat cache */ /* destroy nat cache */
down_write(&nm_i->nat_tree_lock); f2fs_down_write(&nm_i->nat_tree_lock);
while ((found = __gang_lookup_nat_cache(nm_i, while ((found = __gang_lookup_nat_cache(nm_i,
nid, NATVEC_SIZE, natvec))) { nid, NATVEC_SIZE, natvec))) {
unsigned idx; unsigned idx;
@ -3364,7 +3364,7 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
kmem_cache_free(nat_entry_set_slab, setvec[idx]); kmem_cache_free(nat_entry_set_slab, setvec[idx]);
} }
} }
up_write(&nm_i->nat_tree_lock); f2fs_up_write(&nm_i->nat_tree_lock);
kvfree(nm_i->nat_block_bitmap); kvfree(nm_i->nat_block_bitmap);
if (nm_i->free_nid_bitmap) { if (nm_i->free_nid_bitmap) {

View file

@ -796,7 +796,7 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
INIT_LIST_HEAD(&dir_list); INIT_LIST_HEAD(&dir_list);
/* prevent checkpoint */ /* prevent checkpoint */
down_write(&sbi->cp_global_sem); f2fs_down_write(&sbi->cp_global_sem);
/* step #1: find fsynced inode numbers */ /* step #1: find fsynced inode numbers */
err = find_fsync_dnodes(sbi, &inode_list, check_only); err = find_fsync_dnodes(sbi, &inode_list, check_only);
@ -845,7 +845,7 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
if (!err) if (!err)
clear_sbi_flag(sbi, SBI_POR_DOING); clear_sbi_flag(sbi, SBI_POR_DOING);
up_write(&sbi->cp_global_sem); f2fs_up_write(&sbi->cp_global_sem);
/* let's drop all the directory inodes for clean checkpoint */ /* let's drop all the directory inodes for clean checkpoint */
destroy_fsync_dnodes(&dir_list, err); destroy_fsync_dnodes(&dir_list, err);

View file

@ -471,7 +471,7 @@ int f2fs_commit_inmem_pages(struct inode *inode)
f2fs_balance_fs(sbi, true); f2fs_balance_fs(sbi, true);
down_write(&fi->i_gc_rwsem[WRITE]); f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
f2fs_lock_op(sbi); f2fs_lock_op(sbi);
set_inode_flag(inode, FI_ATOMIC_COMMIT); set_inode_flag(inode, FI_ATOMIC_COMMIT);
@ -483,7 +483,7 @@ int f2fs_commit_inmem_pages(struct inode *inode)
clear_inode_flag(inode, FI_ATOMIC_COMMIT); clear_inode_flag(inode, FI_ATOMIC_COMMIT);
f2fs_unlock_op(sbi); f2fs_unlock_op(sbi);
up_write(&fi->i_gc_rwsem[WRITE]); f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
return err; return err;
} }
@ -521,7 +521,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
io_schedule(); io_schedule();
finish_wait(&sbi->gc_thread->fggc_wq, &wait); finish_wait(&sbi->gc_thread->fggc_wq, &wait);
} else { } else {
down_write(&sbi->gc_lock); f2fs_down_write(&sbi->gc_lock);
f2fs_gc(sbi, false, false, false, NULL_SEGNO); f2fs_gc(sbi, false, false, false, NULL_SEGNO);
} }
} }
@ -529,7 +529,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
static inline bool excess_dirty_threshold(struct f2fs_sb_info *sbi) static inline bool excess_dirty_threshold(struct f2fs_sb_info *sbi)
{ {
int factor = rwsem_is_locked(&sbi->cp_rwsem) ? 3 : 2; int factor = f2fs_rwsem_is_locked(&sbi->cp_rwsem) ? 3 : 2;
unsigned int dents = get_pages(sbi, F2FS_DIRTY_DENTS); unsigned int dents = get_pages(sbi, F2FS_DIRTY_DENTS);
unsigned int qdata = get_pages(sbi, F2FS_DIRTY_QDATA); unsigned int qdata = get_pages(sbi, F2FS_DIRTY_QDATA);
unsigned int nodes = get_pages(sbi, F2FS_DIRTY_NODES); unsigned int nodes = get_pages(sbi, F2FS_DIRTY_NODES);
@ -570,7 +570,7 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
/* there is background inflight IO or foreground operation recently */ /* there is background inflight IO or foreground operation recently */
if (is_inflight_io(sbi, REQ_TIME) || if (is_inflight_io(sbi, REQ_TIME) ||
(!f2fs_time_over(sbi, REQ_TIME) && rwsem_is_locked(&sbi->cp_rwsem))) (!f2fs_time_over(sbi, REQ_TIME) && f2fs_rwsem_is_locked(&sbi->cp_rwsem)))
return; return;
/* exceed periodical checkpoint timeout threshold */ /* exceed periodical checkpoint timeout threshold */
@ -2821,7 +2821,7 @@ static void __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi)
if (!sbi->am.atgc_enabled) if (!sbi->am.atgc_enabled)
return; return;
down_read(&SM_I(sbi)->curseg_lock); f2fs_down_read(&SM_I(sbi)->curseg_lock);
mutex_lock(&curseg->curseg_mutex); mutex_lock(&curseg->curseg_mutex);
down_write(&SIT_I(sbi)->sentry_lock); down_write(&SIT_I(sbi)->sentry_lock);
@ -2831,7 +2831,7 @@ static void __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi)
up_write(&SIT_I(sbi)->sentry_lock); up_write(&SIT_I(sbi)->sentry_lock);
mutex_unlock(&curseg->curseg_mutex); mutex_unlock(&curseg->curseg_mutex);
up_read(&SM_I(sbi)->curseg_lock); f2fs_up_read(&SM_I(sbi)->curseg_lock);
} }
void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi) void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi)
@ -2982,7 +2982,7 @@ void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
struct curseg_info *curseg = CURSEG_I(sbi, type); struct curseg_info *curseg = CURSEG_I(sbi, type);
unsigned int segno; unsigned int segno;
down_read(&SM_I(sbi)->curseg_lock); f2fs_down_read(&SM_I(sbi)->curseg_lock);
mutex_lock(&curseg->curseg_mutex); mutex_lock(&curseg->curseg_mutex);
down_write(&SIT_I(sbi)->sentry_lock); down_write(&SIT_I(sbi)->sentry_lock);
@ -3006,7 +3006,7 @@ void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
type, segno, curseg->segno); type, segno, curseg->segno);
mutex_unlock(&curseg->curseg_mutex); mutex_unlock(&curseg->curseg_mutex);
up_read(&SM_I(sbi)->curseg_lock); f2fs_up_read(&SM_I(sbi)->curseg_lock);
} }
static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type, static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type,
@ -3038,23 +3038,23 @@ static void __allocate_new_section(struct f2fs_sb_info *sbi,
void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force) void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force)
{ {
down_read(&SM_I(sbi)->curseg_lock); f2fs_down_read(&SM_I(sbi)->curseg_lock);
down_write(&SIT_I(sbi)->sentry_lock); down_write(&SIT_I(sbi)->sentry_lock);
__allocate_new_section(sbi, type, force); __allocate_new_section(sbi, type, force);
up_write(&SIT_I(sbi)->sentry_lock); up_write(&SIT_I(sbi)->sentry_lock);
up_read(&SM_I(sbi)->curseg_lock); f2fs_up_read(&SM_I(sbi)->curseg_lock);
} }
void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi) void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
{ {
int i; int i;
down_read(&SM_I(sbi)->curseg_lock); f2fs_down_read(&SM_I(sbi)->curseg_lock);
down_write(&SIT_I(sbi)->sentry_lock); down_write(&SIT_I(sbi)->sentry_lock);
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
__allocate_new_segment(sbi, i, false, false); __allocate_new_segment(sbi, i, false, false);
up_write(&SIT_I(sbi)->sentry_lock); up_write(&SIT_I(sbi)->sentry_lock);
up_read(&SM_I(sbi)->curseg_lock); f2fs_up_read(&SM_I(sbi)->curseg_lock);
} }
static const struct segment_allocation default_salloc_ops = { static const struct segment_allocation default_salloc_ops = {
@ -3192,9 +3192,9 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
if (sbi->discard_blks == 0) if (sbi->discard_blks == 0)
goto out; goto out;
down_write(&sbi->gc_lock); f2fs_down_write(&sbi->gc_lock);
err = f2fs_write_checkpoint(sbi, &cpc); err = f2fs_write_checkpoint(sbi, &cpc);
up_write(&sbi->gc_lock); f2fs_up_write(&sbi->gc_lock);
if (err) if (err)
goto out; goto out;
@ -3431,7 +3431,7 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
bool from_gc = (type == CURSEG_ALL_DATA_ATGC); bool from_gc = (type == CURSEG_ALL_DATA_ATGC);
struct seg_entry *se = NULL; struct seg_entry *se = NULL;
down_read(&SM_I(sbi)->curseg_lock); f2fs_down_read(&SM_I(sbi)->curseg_lock);
mutex_lock(&curseg->curseg_mutex); mutex_lock(&curseg->curseg_mutex);
down_write(&sit_i->sentry_lock); down_write(&sit_i->sentry_lock);
@ -3514,7 +3514,7 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
mutex_unlock(&curseg->curseg_mutex); mutex_unlock(&curseg->curseg_mutex);
up_read(&SM_I(sbi)->curseg_lock); f2fs_up_read(&SM_I(sbi)->curseg_lock);
} }
void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino, void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
@ -3550,7 +3550,7 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
bool keep_order = (f2fs_lfs_mode(fio->sbi) && type == CURSEG_COLD_DATA); bool keep_order = (f2fs_lfs_mode(fio->sbi) && type == CURSEG_COLD_DATA);
if (keep_order) if (keep_order)
down_read(&fio->sbi->io_order_lock); f2fs_down_read(&fio->sbi->io_order_lock);
reallocate: reallocate:
f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr, f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
&fio->new_blkaddr, sum, type, fio); &fio->new_blkaddr, sum, type, fio);
@ -3570,7 +3570,7 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
f2fs_update_device_state(fio->sbi, fio->ino, fio->new_blkaddr, 1); f2fs_update_device_state(fio->sbi, fio->ino, fio->new_blkaddr, 1);
if (keep_order) if (keep_order)
up_read(&fio->sbi->io_order_lock); f2fs_up_read(&fio->sbi->io_order_lock);
} }
void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page, void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
@ -3705,7 +3705,7 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
se = get_seg_entry(sbi, segno); se = get_seg_entry(sbi, segno);
type = se->type; type = se->type;
down_write(&SM_I(sbi)->curseg_lock); f2fs_down_write(&SM_I(sbi)->curseg_lock);
if (!recover_curseg) { if (!recover_curseg) {
/* for recovery flow */ /* for recovery flow */
@ -3774,7 +3774,7 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
up_write(&sit_i->sentry_lock); up_write(&sit_i->sentry_lock);
mutex_unlock(&curseg->curseg_mutex); mutex_unlock(&curseg->curseg_mutex);
up_write(&SM_I(sbi)->curseg_lock); f2fs_up_write(&SM_I(sbi)->curseg_lock);
} }
void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
@ -5258,7 +5258,7 @@ int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
INIT_LIST_HEAD(&sm_info->sit_entry_set); INIT_LIST_HEAD(&sm_info->sit_entry_set);
init_rwsem(&sm_info->curseg_lock); init_f2fs_rwsem(&sm_info->curseg_lock);
if (!f2fs_readonly(sbi->sb)) { if (!f2fs_readonly(sbi->sb)) {
err = f2fs_create_flush_cmd_control(sbi); err = f2fs_create_flush_cmd_control(sbi);

View file

@ -1355,16 +1355,16 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
/* Initialize f2fs-specific inode info */ /* Initialize f2fs-specific inode info */
atomic_set(&fi->dirty_pages, 0); atomic_set(&fi->dirty_pages, 0);
atomic_set(&fi->i_compr_blocks, 0); atomic_set(&fi->i_compr_blocks, 0);
init_rwsem(&fi->i_sem); init_f2fs_rwsem(&fi->i_sem);
spin_lock_init(&fi->i_size_lock); spin_lock_init(&fi->i_size_lock);
INIT_LIST_HEAD(&fi->dirty_list); INIT_LIST_HEAD(&fi->dirty_list);
INIT_LIST_HEAD(&fi->gdirty_list); INIT_LIST_HEAD(&fi->gdirty_list);
INIT_LIST_HEAD(&fi->inmem_ilist); INIT_LIST_HEAD(&fi->inmem_ilist);
INIT_LIST_HEAD(&fi->inmem_pages); INIT_LIST_HEAD(&fi->inmem_pages);
mutex_init(&fi->inmem_lock); mutex_init(&fi->inmem_lock);
init_rwsem(&fi->i_gc_rwsem[READ]); init_f2fs_rwsem(&fi->i_gc_rwsem[READ]);
init_rwsem(&fi->i_gc_rwsem[WRITE]); init_f2fs_rwsem(&fi->i_gc_rwsem[WRITE]);
init_rwsem(&fi->i_xattr_sem); init_f2fs_rwsem(&fi->i_xattr_sem);
/* Will be used by directory only */ /* Will be used by directory only */
fi->i_dir_level = F2FS_SB(sb)->dir_level; fi->i_dir_level = F2FS_SB(sb)->dir_level;
@ -2088,7 +2088,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
f2fs_update_time(sbi, DISABLE_TIME); f2fs_update_time(sbi, DISABLE_TIME);
while (!f2fs_time_over(sbi, DISABLE_TIME)) { while (!f2fs_time_over(sbi, DISABLE_TIME)) {
down_write(&sbi->gc_lock); f2fs_down_write(&sbi->gc_lock);
err = f2fs_gc(sbi, true, false, false, NULL_SEGNO); err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
if (err == -ENODATA) { if (err == -ENODATA) {
err = 0; err = 0;
@ -2110,7 +2110,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
goto restore_flag; goto restore_flag;
} }
down_write(&sbi->gc_lock); f2fs_down_write(&sbi->gc_lock);
cpc.reason = CP_PAUSE; cpc.reason = CP_PAUSE;
set_sbi_flag(sbi, SBI_CP_DISABLED); set_sbi_flag(sbi, SBI_CP_DISABLED);
err = f2fs_write_checkpoint(sbi, &cpc); err = f2fs_write_checkpoint(sbi, &cpc);
@ -2122,7 +2122,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
spin_unlock(&sbi->stat_lock); spin_unlock(&sbi->stat_lock);
out_unlock: out_unlock:
up_write(&sbi->gc_lock); f2fs_up_write(&sbi->gc_lock);
restore_flag: restore_flag:
sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */ sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
return err; return err;
@ -2142,12 +2142,12 @@ static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
if (unlikely(retry < 0)) if (unlikely(retry < 0))
f2fs_warn(sbi, "checkpoint=enable has some unwritten data."); f2fs_warn(sbi, "checkpoint=enable has some unwritten data.");
down_write(&sbi->gc_lock); f2fs_down_write(&sbi->gc_lock);
f2fs_dirty_to_prefree(sbi); f2fs_dirty_to_prefree(sbi);
clear_sbi_flag(sbi, SBI_CP_DISABLED); clear_sbi_flag(sbi, SBI_CP_DISABLED);
set_sbi_flag(sbi, SBI_IS_DIRTY); set_sbi_flag(sbi, SBI_IS_DIRTY);
up_write(&sbi->gc_lock); f2fs_up_write(&sbi->gc_lock);
f2fs_sync_fs(sbi->sb, 1); f2fs_sync_fs(sbi->sb, 1);
} }
@ -2707,18 +2707,18 @@ int f2fs_quota_sync(struct super_block *sb, int type)
/* /*
* do_quotactl * do_quotactl
* f2fs_quota_sync * f2fs_quota_sync
* down_read(quota_sem) * f2fs_down_read(quota_sem)
* dquot_writeback_dquots() * dquot_writeback_dquots()
* f2fs_dquot_commit * f2fs_dquot_commit
* block_operation * block_operation
* down_read(quota_sem) * f2fs_down_read(quota_sem)
*/ */
f2fs_lock_op(sbi); f2fs_lock_op(sbi);
down_read(&sbi->quota_sem); f2fs_down_read(&sbi->quota_sem);
ret = f2fs_quota_sync_file(sbi, cnt); ret = f2fs_quota_sync_file(sbi, cnt);
up_read(&sbi->quota_sem); f2fs_up_read(&sbi->quota_sem);
f2fs_unlock_op(sbi); f2fs_unlock_op(sbi);
inode_unlock(dqopt->files[cnt]); inode_unlock(dqopt->files[cnt]);
@ -2843,11 +2843,11 @@ static int f2fs_dquot_commit(struct dquot *dquot)
struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb); struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
int ret; int ret;
down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING); f2fs_down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING);
ret = dquot_commit(dquot); ret = dquot_commit(dquot);
if (ret < 0) if (ret < 0)
set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
up_read(&sbi->quota_sem); f2fs_up_read(&sbi->quota_sem);
return ret; return ret;
} }
@ -2856,11 +2856,11 @@ static int f2fs_dquot_acquire(struct dquot *dquot)
struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb); struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
int ret; int ret;
down_read(&sbi->quota_sem); f2fs_down_read(&sbi->quota_sem);
ret = dquot_acquire(dquot); ret = dquot_acquire(dquot);
if (ret < 0) if (ret < 0)
set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
up_read(&sbi->quota_sem); f2fs_up_read(&sbi->quota_sem);
return ret; return ret;
} }
@ -3601,14 +3601,14 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
INIT_LIST_HEAD(&sbi->s_list); INIT_LIST_HEAD(&sbi->s_list);
mutex_init(&sbi->umount_mutex); mutex_init(&sbi->umount_mutex);
init_rwsem(&sbi->io_order_lock); init_f2fs_rwsem(&sbi->io_order_lock);
spin_lock_init(&sbi->cp_lock); spin_lock_init(&sbi->cp_lock);
sbi->dirty_device = 0; sbi->dirty_device = 0;
spin_lock_init(&sbi->dev_lock); spin_lock_init(&sbi->dev_lock);
init_rwsem(&sbi->sb_lock); init_f2fs_rwsem(&sbi->sb_lock);
init_rwsem(&sbi->pin_sem); init_f2fs_rwsem(&sbi->pin_sem);
} }
static int init_percpu_info(struct f2fs_sb_info *sbi) static int init_percpu_info(struct f2fs_sb_info *sbi)
@ -4067,11 +4067,11 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
/* init f2fs-specific super block info */ /* init f2fs-specific super block info */
sbi->valid_super_block = valid_super_block; sbi->valid_super_block = valid_super_block;
init_rwsem(&sbi->gc_lock); init_f2fs_rwsem(&sbi->gc_lock);
mutex_init(&sbi->writepages); mutex_init(&sbi->writepages);
init_rwsem(&sbi->cp_global_sem); init_f2fs_rwsem(&sbi->cp_global_sem);
init_rwsem(&sbi->node_write); init_f2fs_rwsem(&sbi->node_write);
init_rwsem(&sbi->node_change); init_f2fs_rwsem(&sbi->node_change);
/* disallow all the data/node/meta page writes */ /* disallow all the data/node/meta page writes */
set_sbi_flag(sbi, SBI_POR_DOING); set_sbi_flag(sbi, SBI_POR_DOING);
@ -4092,18 +4092,18 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
} }
for (j = HOT; j < n; j++) { for (j = HOT; j < n; j++) {
init_rwsem(&sbi->write_io[i][j].io_rwsem); init_f2fs_rwsem(&sbi->write_io[i][j].io_rwsem);
sbi->write_io[i][j].sbi = sbi; sbi->write_io[i][j].sbi = sbi;
sbi->write_io[i][j].bio = NULL; sbi->write_io[i][j].bio = NULL;
spin_lock_init(&sbi->write_io[i][j].io_lock); spin_lock_init(&sbi->write_io[i][j].io_lock);
INIT_LIST_HEAD(&sbi->write_io[i][j].io_list); INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
INIT_LIST_HEAD(&sbi->write_io[i][j].bio_list); INIT_LIST_HEAD(&sbi->write_io[i][j].bio_list);
init_rwsem(&sbi->write_io[i][j].bio_list_lock); init_f2fs_rwsem(&sbi->write_io[i][j].bio_list_lock);
} }
} }
init_rwsem(&sbi->cp_rwsem); init_f2fs_rwsem(&sbi->cp_rwsem);
init_rwsem(&sbi->quota_sem); init_f2fs_rwsem(&sbi->quota_sem);
init_waitqueue_head(&sbi->cp_wait); init_waitqueue_head(&sbi->cp_wait);
init_sb_info(sbi); init_sb_info(sbi);

View file

@ -363,7 +363,7 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
if (!strlen(name) || strlen(name) >= F2FS_EXTENSION_LEN) if (!strlen(name) || strlen(name) >= F2FS_EXTENSION_LEN)
return -EINVAL; return -EINVAL;
down_write(&sbi->sb_lock); f2fs_down_write(&sbi->sb_lock);
ret = f2fs_update_extension_list(sbi, name, hot, set); ret = f2fs_update_extension_list(sbi, name, hot, set);
if (ret) if (ret)
@ -373,7 +373,7 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
if (ret) if (ret)
f2fs_update_extension_list(sbi, name, hot, !set); f2fs_update_extension_list(sbi, name, hot, !set);
out: out:
up_write(&sbi->sb_lock); f2fs_up_write(&sbi->sb_lock);
return ret ? ret : count; return ret ? ret : count;
} }

View file

@ -208,7 +208,7 @@ static int f2fs_end_enable_verity(struct file *filp, const void *desc,
* from re-instantiating cached pages we are truncating (since unlike * from re-instantiating cached pages we are truncating (since unlike
* normal file accesses, garbage collection isn't limited by i_size). * normal file accesses, garbage collection isn't limited by i_size).
*/ */
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
truncate_inode_pages(inode->i_mapping, inode->i_size); truncate_inode_pages(inode->i_mapping, inode->i_size);
err2 = f2fs_truncate(inode); err2 = f2fs_truncate(inode);
if (err2) { if (err2) {
@ -216,7 +216,7 @@ static int f2fs_end_enable_verity(struct file *filp, const void *desc,
err2); err2);
set_sbi_flag(sbi, SBI_NEED_FSCK); set_sbi_flag(sbi, SBI_NEED_FSCK);
} }
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
clear_inode_flag(inode, FI_VERITY_IN_PROGRESS); clear_inode_flag(inode, FI_VERITY_IN_PROGRESS);
return err ?: err2; return err ?: err2;
} }

View file

@ -525,10 +525,10 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
if (len > F2FS_NAME_LEN) if (len > F2FS_NAME_LEN)
return -ERANGE; return -ERANGE;
down_read(&F2FS_I(inode)->i_xattr_sem); f2fs_down_read(&F2FS_I(inode)->i_xattr_sem);
error = lookup_all_xattrs(inode, ipage, index, len, name, error = lookup_all_xattrs(inode, ipage, index, len, name,
&entry, &base_addr, &base_size, &is_inline); &entry, &base_addr, &base_size, &is_inline);
up_read(&F2FS_I(inode)->i_xattr_sem); f2fs_up_read(&F2FS_I(inode)->i_xattr_sem);
if (error) if (error)
return error; return error;
@ -562,9 +562,9 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
int error; int error;
size_t rest = buffer_size; size_t rest = buffer_size;
down_read(&F2FS_I(inode)->i_xattr_sem); f2fs_down_read(&F2FS_I(inode)->i_xattr_sem);
error = read_all_xattrs(inode, NULL, &base_addr); error = read_all_xattrs(inode, NULL, &base_addr);
up_read(&F2FS_I(inode)->i_xattr_sem); f2fs_up_read(&F2FS_I(inode)->i_xattr_sem);
if (error) if (error)
return error; return error;
@ -786,9 +786,9 @@ int f2fs_setxattr(struct inode *inode, int index, const char *name,
f2fs_balance_fs(sbi, true); f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi); f2fs_lock_op(sbi);
down_write(&F2FS_I(inode)->i_xattr_sem); f2fs_down_write(&F2FS_I(inode)->i_xattr_sem);
err = __f2fs_setxattr(inode, index, name, value, size, ipage, flags); err = __f2fs_setxattr(inode, index, name, value, size, ipage, flags);
up_write(&F2FS_I(inode)->i_xattr_sem); f2fs_up_write(&F2FS_I(inode)->i_xattr_sem);
f2fs_unlock_op(sbi); f2fs_unlock_op(sbi);
f2fs_update_time(sbi, REQ_TIME); f2fs_update_time(sbi, REQ_TIME);