btrfs: move variables for clustered allocation into find_free_extent_ctl

Move "last_ptr" and "use_cluster" into struct find_free_extent_ctl, so
that hook functions for clustered allocator can use these variables.

Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Naohiro Aota 2020-02-25 12:56:18 +09:00 committed by David Sterba
parent ea544149a4
commit c10859be9b

View file

@ -3447,6 +3447,8 @@ struct find_free_extent_ctl {
/* For clustered allocation */
u64 empty_cluster;
struct btrfs_free_cluster *last_ptr;
bool use_cluster;
bool have_caching_bg;
bool orig_have_caching_bg;
@ -3805,11 +3807,9 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
{
int ret = 0;
int cache_block_group_error = 0;
struct btrfs_free_cluster *last_ptr = NULL;
struct btrfs_block_group *block_group = NULL;
struct find_free_extent_ctl ffe_ctl = {0};
struct btrfs_space_info *space_info;
bool use_cluster = true;
bool full_search = false;
WARN_ON(num_bytes < fs_info->sectorsize);
@ -3818,8 +3818,6 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
ffe_ctl.empty_size = empty_size;
ffe_ctl.flags = flags;
ffe_ctl.search_start = 0;
ffe_ctl.retry_clustered = false;
ffe_ctl.retry_unclustered = false;
ffe_ctl.delalloc = delalloc;
ffe_ctl.index = btrfs_bg_flags_to_raid_index(flags);
ffe_ctl.have_caching_bg = false;
@ -3828,6 +3826,12 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
ffe_ctl.hint_byte = hint_byte_orig;
ffe_ctl.policy = BTRFS_EXTENT_ALLOC_CLUSTERED;
/* For clustered allocation */
ffe_ctl.retry_clustered = false;
ffe_ctl.retry_unclustered = false;
ffe_ctl.last_ptr = NULL;
ffe_ctl.use_cluster = true;
ins->type = BTRFS_EXTENT_ITEM_KEY;
ins->objectid = 0;
ins->offset = 0;
@ -3858,14 +3862,16 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
spin_unlock(&space_info->lock);
return -ENOSPC;
} else if (space_info->max_extent_size) {
use_cluster = false;
ffe_ctl.use_cluster = false;
}
spin_unlock(&space_info->lock);
}
last_ptr = fetch_cluster_info(fs_info, space_info,
&ffe_ctl.empty_cluster);
if (last_ptr) {
ffe_ctl.last_ptr = fetch_cluster_info(fs_info, space_info,
&ffe_ctl.empty_cluster);
if (ffe_ctl.last_ptr) {
struct btrfs_free_cluster *last_ptr = ffe_ctl.last_ptr;
spin_lock(&last_ptr->lock);
if (last_ptr->block_group)
ffe_ctl.hint_byte = last_ptr->window_start;
@ -3876,7 +3882,7 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
* some time.
*/
ffe_ctl.hint_byte = last_ptr->window_start;
use_cluster = false;
ffe_ctl.use_cluster = false;
}
spin_unlock(&last_ptr->lock);
}
@ -3989,10 +3995,11 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
* Ok we want to try and use the cluster allocator, so
* lets look there
*/
if (last_ptr && use_cluster) {
if (ffe_ctl.last_ptr && ffe_ctl.use_cluster) {
struct btrfs_block_group *cluster_bg = NULL;
ret = find_free_extent_clustered(block_group, last_ptr,
ret = find_free_extent_clustered(block_group,
ffe_ctl.last_ptr,
&ffe_ctl, &cluster_bg);
if (ret == 0) {
@ -4010,8 +4017,8 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
/* ret == -ENOENT case falls through */
}
ret = find_free_extent_unclustered(block_group, last_ptr,
&ffe_ctl);
ret = find_free_extent_unclustered(block_group,
ffe_ctl.last_ptr, &ffe_ctl);
if (ret == -EAGAIN)
goto have_block_group;
else if (ret > 0)
@ -4060,8 +4067,9 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
}
up_read(&space_info->groups_sem);
ret = find_free_extent_update_loop(fs_info, last_ptr, ins, &ffe_ctl,
full_search, use_cluster);
ret = find_free_extent_update_loop(fs_info, ffe_ctl.last_ptr, ins,
&ffe_ctl, full_search,
ffe_ctl.use_cluster);
if (ret > 0)
goto search;