mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-02 15:18:19 +00:00
btrfs: breakout empty head cleanup to a helper
Move this code out to a helper function to further simplivy __btrfs_run_delayed_refs. Signed-off-by: Josef Bacik <jbacik@fb.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
b00e62507e
commit
194ab0bc21
1 changed files with 45 additions and 34 deletions
|
@ -2605,6 +2605,43 @@ static int cleanup_extent_op(struct btrfs_trans_handle *trans,
|
||||||
return ret ? ret : 1;
|
return ret ? ret : 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int cleanup_ref_head(struct btrfs_trans_handle *trans,
|
||||||
|
struct btrfs_fs_info *fs_info,
|
||||||
|
struct btrfs_delayed_ref_head *head)
|
||||||
|
{
|
||||||
|
struct btrfs_delayed_ref_root *delayed_refs;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
delayed_refs = &trans->transaction->delayed_refs;
|
||||||
|
|
||||||
|
ret = cleanup_extent_op(trans, fs_info, head);
|
||||||
|
if (ret < 0) {
|
||||||
|
unselect_delayed_ref_head(delayed_refs, head);
|
||||||
|
btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
|
||||||
|
return ret;
|
||||||
|
} else if (ret) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Need to drop our head ref lock and re-acquire the delayed ref lock
|
||||||
|
* and then re-check to make sure nobody got added.
|
||||||
|
*/
|
||||||
|
spin_unlock(&head->lock);
|
||||||
|
spin_lock(&delayed_refs->lock);
|
||||||
|
spin_lock(&head->lock);
|
||||||
|
if (!list_empty(&head->ref_list) || head->extent_op) {
|
||||||
|
spin_unlock(&head->lock);
|
||||||
|
spin_unlock(&delayed_refs->lock);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
head->node.in_tree = 0;
|
||||||
|
delayed_refs->num_heads--;
|
||||||
|
rb_erase(&head->href_node, &delayed_refs->href_root);
|
||||||
|
spin_unlock(&delayed_refs->lock);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Returns 0 on success or if called with an already aborted transaction.
|
* Returns 0 on success or if called with an already aborted transaction.
|
||||||
* Returns -ENOMEM or -EIO on failure and will abort the transaction.
|
* Returns -ENOMEM or -EIO on failure and will abort the transaction.
|
||||||
|
@ -2686,47 +2723,21 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!ref) {
|
if (!ref) {
|
||||||
|
ret = cleanup_ref_head(trans, fs_info, locked_ref);
|
||||||
|
if (ret > 0 ) {
|
||||||
/* All delayed refs have been processed, Go ahead
|
|
||||||
* and send the head node to run_one_delayed_ref,
|
|
||||||
* so that any accounting fixes can happen
|
|
||||||
*/
|
|
||||||
ref = &locked_ref->node;
|
|
||||||
|
|
||||||
ret = cleanup_extent_op(trans, fs_info, locked_ref);
|
|
||||||
if (ret < 0) {
|
|
||||||
unselect_delayed_ref_head(delayed_refs,
|
|
||||||
locked_ref);
|
|
||||||
btrfs_debug(fs_info,
|
|
||||||
"run_delayed_extent_op returned %d",
|
|
||||||
ret);
|
|
||||||
return ret;
|
|
||||||
} else if (ret > 0) {
|
|
||||||
/* We dropped our lock, we need to loop. */
|
/* We dropped our lock, we need to loop. */
|
||||||
ret = 0;
|
ret = 0;
|
||||||
continue;
|
continue;
|
||||||
|
} else if (ret) {
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Need to drop our head ref lock and re-acquire the
|
* All delayed refs have been processed, Go ahead and
|
||||||
* delayed ref lock and then re-check to make sure
|
* send the head node to run_one_delayed_ref, so that
|
||||||
* nobody got added.
|
* any accounting fixes can happen
|
||||||
*/
|
*/
|
||||||
spin_unlock(&locked_ref->lock);
|
ref = &locked_ref->node;
|
||||||
spin_lock(&delayed_refs->lock);
|
|
||||||
spin_lock(&locked_ref->lock);
|
|
||||||
if (!list_empty(&locked_ref->ref_list) ||
|
|
||||||
locked_ref->extent_op) {
|
|
||||||
spin_unlock(&locked_ref->lock);
|
|
||||||
spin_unlock(&delayed_refs->lock);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
ref->in_tree = 0;
|
|
||||||
delayed_refs->num_heads--;
|
|
||||||
rb_erase(&locked_ref->href_node,
|
|
||||||
&delayed_refs->href_root);
|
|
||||||
spin_unlock(&delayed_refs->lock);
|
|
||||||
} else {
|
} else {
|
||||||
actual_count++;
|
actual_count++;
|
||||||
ref->in_tree = 0;
|
ref->in_tree = 0;
|
||||||
|
|
Loading…
Reference in a new issue