btrfs: add a cached_state to try_lock_extent

With nowait becoming more pervasive throughout our codebase go ahead and
add a cached_state to try_lock_extent().  This allows us to be faster
about clearing the locked area if we have contention, and then gives us
the same optimization for unlock if we are able to lock the range.

Reviewed-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Josef Bacik 2022-09-30 16:45:09 -04:00 committed by David Sterba
parent 76dcd734ec
commit 83ae4133ac
7 changed files with 14 additions and 9 deletions

View File

@ -1615,17 +1615,18 @@ int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
changeset);
}
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached)
{
int err;
u64 failed_start;
err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, &failed_start,
NULL, NULL, GFP_NOFS);
cached, NULL, GFP_NOFS);
if (err == -EEXIST) {
if (failed_start > start)
clear_extent_bit(tree, start, failed_start - 1,
EXTENT_LOCKED, NULL);
EXTENT_LOCKED, cached);
return 0;
}
return 1;

View File

@ -106,7 +106,8 @@ void extent_io_tree_release(struct extent_io_tree *tree);
int lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached);
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached);
int __init extent_state_init_cachep(void);
void __cold extent_state_free_cachep(void);

View File

@ -4959,7 +4959,8 @@ static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait,
io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
if (wait == WAIT_NONE) {
if (!try_lock_extent(io_tree, eb->start, eb->start + eb->len - 1))
if (!try_lock_extent(io_tree, eb->start, eb->start + eb->len - 1,
NULL))
return -EAGAIN;
} else {
ret = lock_extent(io_tree, eb->start, eb->start + eb->len - 1, NULL);

View File

@ -1302,7 +1302,8 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
struct btrfs_ordered_extent *ordered;
if (nowait) {
if (!try_lock_extent(&inode->io_tree, start_pos, last_pos)) {
if (!try_lock_extent(&inode->io_tree, start_pos, last_pos,
cached_state)) {
for (i = 0; i < num_pages; i++) {
unlock_page(pages[i]);
put_page(pages[i]);

View File

@ -7255,7 +7255,8 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
while (1) {
if (nowait) {
if (!try_lock_extent(io_tree, lockstart, lockend))
if (!try_lock_extent(io_tree, lockstart, lockend,
cached_state))
return -EAGAIN;
} else {
lock_extent(io_tree, lockstart, lockend, cached_state);

View File

@ -1073,7 +1073,7 @@ bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end)
{
struct btrfs_ordered_extent *ordered;
if (!try_lock_extent(&inode->io_tree, start, end))
if (!try_lock_extent(&inode->io_tree, start, end, NULL))
return false;
ordered = btrfs_lookup_ordered_range(inode, start, end - start + 1);

View File

@ -1120,7 +1120,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
end--;
ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
key.offset, end);
key.offset, end, NULL);
if (!ret)
continue;