btrfs: reloc: factor out relocation page read and dirty part

In function relocate_file_extent_cluster(), we have a big loop for
marking all involved page delalloc.

That part is long enough to be contained in one function, so this patch
will move that code chunk into a new function, relocate_one_page().

This also provides enough space for later subpage work.

Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Qu Wenruo 2021-07-26 14:34:56 +08:00 committed by David Sterba
parent a6e66e6f8c
commit f47960f49e
1 changed files with 93 additions and 105 deletions

View File

@ -2886,19 +2886,102 @@ noinline int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info)
}
ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE);
static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
struct file_extent_cluster *cluster,
int *cluster_nr, unsigned long page_index)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
u64 offset = BTRFS_I(inode)->index_cnt;
const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT;
gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
struct page *page;
u64 page_start;
u64 page_end;
int ret;
ASSERT(page_index <= last_index);
ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), PAGE_SIZE);
if (ret)
return ret;
page = find_lock_page(inode->i_mapping, page_index);
if (!page) {
page_cache_sync_readahead(inode->i_mapping, ra, NULL,
page_index, last_index + 1 - page_index);
page = find_or_create_page(inode->i_mapping, page_index, mask);
if (!page) {
ret = -ENOMEM;
goto release_delalloc;
}
}
ret = set_page_extent_mapped(page);
if (ret < 0)
goto release_page;
if (PageReadahead(page))
page_cache_async_readahead(inode->i_mapping, ra, NULL, page,
page_index, last_index + 1 - page_index);
if (!PageUptodate(page)) {
btrfs_readpage(NULL, page);
lock_page(page);
if (!PageUptodate(page)) {
ret = -EIO;
goto release_page;
}
}
page_start = page_offset(page);
page_end = page_start + PAGE_SIZE - 1;
lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
if (*cluster_nr < cluster->nr &&
page_start + offset == cluster->boundary[*cluster_nr]) {
set_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
EXTENT_BOUNDARY);
(*cluster_nr)++;
}
ret = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start, page_end,
0, NULL);
if (ret) {
clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start,
page_end, EXTENT_LOCKED | EXTENT_BOUNDARY);
goto release_page;
}
set_page_dirty(page);
unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
unlock_page(page);
put_page(page);
btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
balance_dirty_pages_ratelimited(inode->i_mapping);
btrfs_throttle(fs_info);
if (btrfs_should_cancel_balance(fs_info))
ret = -ECANCELED;
return ret;
release_page:
unlock_page(page);
put_page(page);
release_delalloc:
btrfs_delalloc_release_metadata(BTRFS_I(inode), PAGE_SIZE, true);
btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
return ret;
}
static int relocate_file_extent_cluster(struct inode *inode,
struct file_extent_cluster *cluster)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
u64 page_start;
u64 page_end;
u64 offset = BTRFS_I(inode)->index_cnt;
unsigned long index;
unsigned long last_index;
struct page *page;
struct file_ra_state *ra;
gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
int nr = 0;
int cluster_nr = 0;
int ret = 0;
if (!cluster->nr)
@ -2919,109 +3002,14 @@ static int relocate_file_extent_cluster(struct inode *inode,
if (ret)
goto out;
index = (cluster->start - offset) >> PAGE_SHIFT;
last_index = (cluster->end - offset) >> PAGE_SHIFT;
while (index <= last_index) {
ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
PAGE_SIZE);
if (ret)
goto out;
page = find_lock_page(inode->i_mapping, index);
if (!page) {
page_cache_sync_readahead(inode->i_mapping,
ra, NULL, index,
last_index + 1 - index);
page = find_or_create_page(inode->i_mapping, index,
mask);
if (!page) {
btrfs_delalloc_release_metadata(BTRFS_I(inode),
PAGE_SIZE, true);
btrfs_delalloc_release_extents(BTRFS_I(inode),
PAGE_SIZE);
ret = -ENOMEM;
goto out;
}
}
ret = set_page_extent_mapped(page);
if (ret < 0) {
btrfs_delalloc_release_metadata(BTRFS_I(inode),
PAGE_SIZE, true);
btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
unlock_page(page);
put_page(page);
goto out;
}
if (PageReadahead(page)) {
page_cache_async_readahead(inode->i_mapping,
ra, NULL, page, index,
last_index + 1 - index);
}
if (!PageUptodate(page)) {
btrfs_readpage(NULL, page);
lock_page(page);
if (!PageUptodate(page)) {
unlock_page(page);
put_page(page);
btrfs_delalloc_release_metadata(BTRFS_I(inode),
PAGE_SIZE, true);
btrfs_delalloc_release_extents(BTRFS_I(inode),
PAGE_SIZE);
ret = -EIO;
goto out;
}
}
page_start = page_offset(page);
page_end = page_start + PAGE_SIZE - 1;
lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
if (nr < cluster->nr &&
page_start + offset == cluster->boundary[nr]) {
set_extent_bits(&BTRFS_I(inode)->io_tree,
page_start, page_end,
EXTENT_BOUNDARY);
nr++;
}
ret = btrfs_set_extent_delalloc(BTRFS_I(inode), page_start,
page_end, 0, NULL);
if (ret) {
unlock_page(page);
put_page(page);
btrfs_delalloc_release_metadata(BTRFS_I(inode),
PAGE_SIZE, true);
btrfs_delalloc_release_extents(BTRFS_I(inode),
PAGE_SIZE);
clear_extent_bits(&BTRFS_I(inode)->io_tree,
page_start, page_end,
EXTENT_LOCKED | EXTENT_BOUNDARY);
goto out;
}
set_page_dirty(page);
unlock_extent(&BTRFS_I(inode)->io_tree,
page_start, page_end);
unlock_page(page);
put_page(page);
index++;
btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
balance_dirty_pages_ratelimited(inode->i_mapping);
btrfs_throttle(fs_info);
if (btrfs_should_cancel_balance(fs_info)) {
ret = -ECANCELED;
goto out;
}
}
WARN_ON(nr != cluster->nr);
for (index = (cluster->start - offset) >> PAGE_SHIFT;
index <= last_index && !ret; index++)
ret = relocate_one_page(inode, ra, cluster, &cluster_nr, index);
if (btrfs_is_zoned(fs_info) && !ret)
ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
if (ret == 0)
WARN_ON(cluster_nr != cluster->nr);
out:
kfree(ra);
return ret;