f2fs: compress: fix potential deadlock of compress file

commit 7377e85396 upstream.

There is a potential deadlock between writeback process and a process
performing write_begin() or write_cache_pages() while trying to write
same compress file, but not compressable, as below:

[Process A] - doing checkpoint
[Process B]                     [Process C]
f2fs_write_cache_pages()
- lock_page() [all pages in cluster, 0-31]
- f2fs_write_multi_pages()
 - f2fs_write_raw_pages()
  - f2fs_write_single_data_page()
   - f2fs_do_write_data_page()
     - return -EAGAIN [f2fs_trylock_op() failed]
   - unlock_page(page) [e.g., page 0]
                                - generic_perform_write()
                                 - f2fs_write_begin()
                                  - f2fs_prepare_compress_overwrite()
                                   - prepare_compress_overwrite()
                                    - lock_page() [e.g., page 0]
                                    - lock_page() [e.g., page 1]
   - lock_page(page) [e.g., page 0]

Since there is no compress process, it is no longer necessary to hold
locks on every pages in cluster within f2fs_write_raw_pages().

This patch changes f2fs_write_raw_pages() to release all locks first
and then perform write same as the non-compress file in
f2fs_write_cache_pages().

Fixes: 4c8ff7095b ("f2fs: support data compression")
Signed-off-by: Hyeong-Jun Kim <hj514.kim@samsung.com>
Signed-off-by: Sungjong Seo <sj1557.seo@samsung.com>
Signed-off-by: Youngjin Gil <youngjin.gil@samsung.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Hyeong-Jun Kim 2021-12-10 13:30:12 +09:00 committed by Greg Kroah-Hartman
parent b653ee5c41
commit 56dabe87c4

View file

@ -1448,25 +1448,38 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc,
enum iostat_type io_type)
{
struct address_space *mapping = cc->inode->i_mapping;
int _submitted, compr_blocks, ret;
int i = -1, err = 0;
int _submitted, compr_blocks, ret, i;
compr_blocks = f2fs_compressed_blocks(cc);
if (compr_blocks < 0) {
err = compr_blocks;
goto out_err;
for (i = 0; i < cc->cluster_size; i++) {
if (!cc->rpages[i])
continue;
redirty_page_for_writepage(wbc, cc->rpages[i]);
unlock_page(cc->rpages[i]);
}
if (compr_blocks < 0)
return compr_blocks;
for (i = 0; i < cc->cluster_size; i++) {
if (!cc->rpages[i])
continue;
retry_write:
lock_page(cc->rpages[i]);
if (cc->rpages[i]->mapping != mapping) {
continue_unlock:
unlock_page(cc->rpages[i]);
continue;
}
BUG_ON(!PageLocked(cc->rpages[i]));
if (!PageDirty(cc->rpages[i]))
goto continue_unlock;
if (!clear_page_dirty_for_io(cc->rpages[i]))
goto continue_unlock;
ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
NULL, NULL, wbc, io_type,
@ -1481,26 +1494,15 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc,
* avoid deadlock caused by cluster update race
* from foreground operation.
*/
if (IS_NOQUOTA(cc->inode)) {
err = 0;
goto out_err;
}
if (IS_NOQUOTA(cc->inode))
return 0;
ret = 0;
cond_resched();
congestion_wait(BLK_RW_ASYNC,
DEFAULT_IO_TIMEOUT);
lock_page(cc->rpages[i]);
if (!PageDirty(cc->rpages[i])) {
unlock_page(cc->rpages[i]);
continue;
}
clear_page_dirty_for_io(cc->rpages[i]);
goto retry_write;
}
err = ret;
goto out_err;
return ret;
}
*submitted += _submitted;
@ -1509,14 +1511,6 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc,
f2fs_balance_fs(F2FS_M_SB(mapping), true);
return 0;
out_err:
for (++i; i < cc->cluster_size; i++) {
if (!cc->rpages[i])
continue;
redirty_page_for_writepage(wbc, cc->rpages[i]);
unlock_page(cc->rpages[i]);
}
return err;
}
int f2fs_write_multi_pages(struct compress_ctx *cc,