btrfs: uninline some static inline helpers from tree-log.h

The helpers are doing an initialization or release work, none of which
is performance critical that it would require a static inline, so move
them to the .c file.

Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
David Sterba 2024-02-16 14:03:08 +01:00
parent 2be1f2bf23
commit c207adc147
2 changed files with 49 additions and 45 deletions

View File

@ -2818,6 +2818,52 @@ static void wait_for_writer(struct btrfs_root *root)
finish_wait(&root->log_writer_wait, &wait);
}
void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx, struct inode *inode)
{
ctx->log_ret = 0;
ctx->log_transid = 0;
ctx->log_new_dentries = false;
ctx->logging_new_name = false;
ctx->logging_new_delayed_dentries = false;
ctx->logged_before = false;
ctx->inode = inode;
INIT_LIST_HEAD(&ctx->list);
INIT_LIST_HEAD(&ctx->ordered_extents);
INIT_LIST_HEAD(&ctx->conflict_inodes);
ctx->num_conflict_inodes = 0;
ctx->logging_conflict_inodes = false;
ctx->scratch_eb = NULL;
}
void btrfs_init_log_ctx_scratch_eb(struct btrfs_log_ctx *ctx)
{
struct btrfs_inode *inode = BTRFS_I(ctx->inode);
if (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) &&
!test_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags))
return;
/*
* Don't care about allocation failure. This is just for optimization,
* if we fail to allocate here, we will try again later if needed.
*/
ctx->scratch_eb = alloc_dummy_extent_buffer(inode->root->fs_info, 0);
}
void btrfs_release_log_ctx_extents(struct btrfs_log_ctx *ctx)
{
struct btrfs_ordered_extent *ordered;
struct btrfs_ordered_extent *tmp;
ASSERT(inode_is_locked(ctx->inode));
list_for_each_entry_safe(ordered, tmp, &ctx->ordered_extents, log_list) {
list_del_init(&ordered->log_list);
btrfs_put_ordered_extent(ordered);
}
}
static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
struct btrfs_log_ctx *ctx)
{

View File

@ -55,51 +55,9 @@ struct btrfs_log_ctx {
struct extent_buffer *scratch_eb;
};
static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx,
struct inode *inode)
{
ctx->log_ret = 0;
ctx->log_transid = 0;
ctx->log_new_dentries = false;
ctx->logging_new_name = false;
ctx->logging_new_delayed_dentries = false;
ctx->logged_before = false;
ctx->inode = inode;
INIT_LIST_HEAD(&ctx->list);
INIT_LIST_HEAD(&ctx->ordered_extents);
INIT_LIST_HEAD(&ctx->conflict_inodes);
ctx->num_conflict_inodes = 0;
ctx->logging_conflict_inodes = false;
ctx->scratch_eb = NULL;
}
static inline void btrfs_init_log_ctx_scratch_eb(struct btrfs_log_ctx *ctx)
{
struct btrfs_inode *inode = BTRFS_I(ctx->inode);
if (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) &&
!test_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags))
return;
/*
* Don't care about allocation failure. This is just for optimization,
* if we fail to allocate here, we will try again later if needed.
*/
ctx->scratch_eb = alloc_dummy_extent_buffer(inode->root->fs_info, 0);
}
static inline void btrfs_release_log_ctx_extents(struct btrfs_log_ctx *ctx)
{
struct btrfs_ordered_extent *ordered;
struct btrfs_ordered_extent *tmp;
ASSERT(inode_is_locked(ctx->inode));
list_for_each_entry_safe(ordered, tmp, &ctx->ordered_extents, log_list) {
list_del_init(&ordered->log_list);
btrfs_put_ordered_extent(ordered);
}
}
void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx, struct inode *inode);
void btrfs_init_log_ctx_scratch_eb(struct btrfs_log_ctx *ctx);
void btrfs_release_log_ctx_extents(struct btrfs_log_ctx *ctx);
static inline void btrfs_set_log_full_commit(struct btrfs_trans_handle *trans)
{