btrfs: free more things in btrfs_free_fs_info

Things like the percpu_counters, the mapping_tree, and the csum hash can
all be freed at btrfs_free_fs_info time, since the helpers all check if
the structure has been initialized already.  This significantly cleans
up the error cases in open_ctree.

Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Josef Bacik 2020-01-24 09:32:57 -05:00 committed by David Sterba
parent bc44d7c4b2
commit 141386e1a5

View file

@ -98,6 +98,12 @@ void __cold btrfs_end_io_wq_exit(void)
kmem_cache_destroy(btrfs_end_io_wq_cache);
}
static void btrfs_free_csum_hash(struct btrfs_fs_info *fs_info)
{
if (fs_info->csum_shash)
crypto_free_shash(fs_info->csum_shash);
}
/*
* async submit bios are used to offload expensive checksumming
* onto the worker threads. They checksum file and metadata bios
@ -1527,6 +1533,13 @@ int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
{
percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
percpu_counter_destroy(&fs_info->delalloc_bytes);
percpu_counter_destroy(&fs_info->dio_bytes);
percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
btrfs_free_csum_hash(fs_info);
btrfs_free_stripe_hash_table(fs_info);
btrfs_free_ref_cache(fs_info);
kfree(fs_info->balance_ctl);
kfree(fs_info->delayed_root);
btrfs_put_fs_root(fs_info->extent_root);
@ -2207,11 +2220,6 @@ static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type)
return 0;
}
static void btrfs_free_csum_hash(struct btrfs_fs_info *fs_info)
{
crypto_free_shash(fs_info->csum_shash);
}
static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
struct btrfs_fs_devices *fs_devices)
{
@ -2688,7 +2696,7 @@ int __cold open_ctree(struct super_block *sb,
ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
if (ret) {
err = ret;
goto fail_dio_bytes;
goto fail_srcu;
}
fs_info->dirty_metadata_batch = PAGE_SIZE *
(1 + ilog2(nr_cpu_ids));
@ -2696,14 +2704,14 @@ int __cold open_ctree(struct super_block *sb,
ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
if (ret) {
err = ret;
goto fail_dirty_metadata_bytes;
goto fail_srcu;
}
ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0,
GFP_KERNEL);
if (ret) {
err = ret;
goto fail_delalloc_bytes;
goto fail_srcu;
}
INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
@ -2770,7 +2778,7 @@ int __cold open_ctree(struct super_block *sb,
fs_info->btree_inode = new_inode(sb);
if (!fs_info->btree_inode) {
err = -ENOMEM;
goto fail_bio_counter;
goto fail_srcu;
}
mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
@ -2883,7 +2891,7 @@ int __cold open_ctree(struct super_block *sb,
btrfs_err(fs_info, "superblock checksum mismatch");
err = -EINVAL;
brelse(bh);
goto fail_csum;
goto fail_alloc;
}
/*
@ -2920,11 +2928,11 @@ int __cold open_ctree(struct super_block *sb,
if (ret) {
btrfs_err(fs_info, "superblock contains fatal errors");
err = -EINVAL;
goto fail_csum;
goto fail_alloc;
}
if (!btrfs_super_root(disk_super))
goto fail_csum;
goto fail_alloc;
/* check FS state, whether FS is broken. */
if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
@ -2939,7 +2947,7 @@ int __cold open_ctree(struct super_block *sb,
ret = btrfs_parse_options(fs_info, options, sb->s_flags);
if (ret) {
err = ret;
goto fail_csum;
goto fail_alloc;
}
features = btrfs_super_incompat_flags(disk_super) &
@ -2949,7 +2957,7 @@ int __cold open_ctree(struct super_block *sb,
"cannot mount because of unsupported optional features (%llx)",
features);
err = -EINVAL;
goto fail_csum;
goto fail_alloc;
}
features = btrfs_super_incompat_flags(disk_super);
@ -2993,7 +3001,7 @@ int __cold open_ctree(struct super_block *sb,
btrfs_err(fs_info,
"unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
nodesize, sectorsize);
goto fail_csum;
goto fail_alloc;
}
/*
@ -3009,7 +3017,7 @@ int __cold open_ctree(struct super_block *sb,
"cannot mount read-write because of unsupported optional features (%llx)",
features);
err = -EINVAL;
goto fail_csum;
goto fail_alloc;
}
ret = btrfs_init_workqueues(fs_info, fs_devices);
@ -3346,25 +3354,14 @@ int __cold open_ctree(struct super_block *sb,
fail_sb_buffer:
btrfs_stop_all_workers(fs_info);
btrfs_free_block_groups(fs_info);
fail_csum:
btrfs_free_csum_hash(fs_info);
fail_alloc:
fail_iput:
btrfs_mapping_tree_free(&fs_info->mapping_tree);
iput(fs_info->btree_inode);
fail_bio_counter:
percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
fail_delalloc_bytes:
percpu_counter_destroy(&fs_info->delalloc_bytes);
fail_dirty_metadata_bytes:
percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
fail_dio_bytes:
percpu_counter_destroy(&fs_info->dio_bytes);
fail_srcu:
cleanup_srcu_struct(&fs_info->subvol_srcu);
fail:
btrfs_free_stripe_hash_table(fs_info);
btrfs_close_devices(fs_info->fs_devices);
return err;
}
@ -4071,16 +4068,7 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
btrfs_mapping_tree_free(&fs_info->mapping_tree);
btrfs_close_devices(fs_info->fs_devices);
percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
percpu_counter_destroy(&fs_info->delalloc_bytes);
percpu_counter_destroy(&fs_info->dio_bytes);
percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
cleanup_srcu_struct(&fs_info->subvol_srcu);
btrfs_free_csum_hash(fs_info);
btrfs_free_stripe_hash_table(fs_info);
btrfs_free_ref_cache(fs_info);
}
int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,