btrfs: pass btrfs_inode to btrfs_inode_unlock

The function is for internal interfaces so we should use the
btrfs_inode.

Reviewed-by: Anand Jain <anand.jain@oracle.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
David Sterba 2022-10-27 02:41:32 +02:00
parent 29b6352b14
commit e5d4d75bd3
8 changed files with 36 additions and 36 deletions

View File

@ -546,7 +546,7 @@ enum btrfs_ilock_type {
};
int btrfs_inode_lock(struct btrfs_inode *inode, unsigned int ilock_flags);
void btrfs_inode_unlock(struct inode *inode, unsigned int ilock_flags);
void btrfs_inode_unlock(struct btrfs_inode *inode, unsigned int ilock_flags);
void btrfs_update_inode_bytes(struct btrfs_inode *inode, const u64 add_bytes,
const u64 del_bytes);
void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end);

View File

@ -1298,11 +1298,11 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
btrfs_inode_lock(BTRFS_I(inode), 0);
if (IS_SWAPFILE(inode)) {
ret = -ETXTBSY;
btrfs_inode_unlock(inode, 0);
btrfs_inode_unlock(BTRFS_I(inode), 0);
break;
}
if (!(inode->i_sb->s_flags & SB_ACTIVE)) {
btrfs_inode_unlock(inode, 0);
btrfs_inode_unlock(BTRFS_I(inode), 0);
break;
}
if (do_compress)
@ -1315,7 +1315,7 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
if (sectors_defragged > prev_sectors_defragged)
balance_dirty_pages_ratelimited(inode->i_mapping);
btrfs_inode_unlock(inode, 0);
btrfs_inode_unlock(BTRFS_I(inode), 0);
if (ret < 0)
break;
cur = max(cluster_end + 1, last_scanned);
@ -1353,7 +1353,7 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
if (do_compress) {
btrfs_inode_lock(BTRFS_I(inode), 0);
BTRFS_I(inode)->defrag_compress = BTRFS_COMPRESS_NONE;
btrfs_inode_unlock(inode, 0);
btrfs_inode_unlock(BTRFS_I(inode), 0);
}
return ret;
}

View File

@ -1646,7 +1646,7 @@ bool btrfs_readdir_get_delayed_items(struct inode *inode,
* We can only do one readdir with delayed items at a time because of
* item->readdir_list.
*/
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
btrfs_inode_lock(BTRFS_I(inode), 0);
mutex_lock(&delayed_node->mutex);

View File

@ -1428,7 +1428,7 @@ again:
iocb->ki_pos += num_written;
}
out:
btrfs_inode_unlock(inode, ilock_flags);
btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
return num_written ? num_written : ret;
}
@ -1474,13 +1474,13 @@ relock:
err = generic_write_checks(iocb, from);
if (err <= 0) {
btrfs_inode_unlock(inode, ilock_flags);
btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
return err;
}
err = btrfs_write_check(iocb, from, err);
if (err < 0) {
btrfs_inode_unlock(inode, ilock_flags);
btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
goto out;
}
@ -1491,13 +1491,13 @@ relock:
*/
if ((ilock_flags & BTRFS_ILOCK_SHARED) &&
pos + iov_iter_count(from) > i_size_read(inode)) {
btrfs_inode_unlock(inode, ilock_flags);
btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
ilock_flags &= ~BTRFS_ILOCK_SHARED;
goto relock;
}
if (check_direct_IO(fs_info, from, pos)) {
btrfs_inode_unlock(inode, ilock_flags);
btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
goto buffered;
}
@ -1528,7 +1528,7 @@ relock:
* iocb, and that needs to lock the inode. So unlock it before calling
* iomap_dio_complete() to avoid a deadlock.
*/
btrfs_inode_unlock(inode, ilock_flags);
btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
if (IS_ERR_OR_NULL(dio))
err = PTR_ERR_OR_ZERO(dio);
@ -1635,7 +1635,7 @@ static ssize_t btrfs_encoded_write(struct kiocb *iocb, struct iov_iter *from,
ret = btrfs_do_encoded_write(iocb, from, encoded);
out:
btrfs_inode_unlock(inode, 0);
btrfs_inode_unlock(BTRFS_I(inode), 0);
return ret;
}
@ -1830,7 +1830,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
*/
ret = start_ordered_ops(inode, start, end);
if (ret) {
btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
goto out;
}
@ -1933,7 +1933,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* file again, but that will end up using the synchronization
* inside btrfs_sync_log to keep things safe.
*/
btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
if (ret == BTRFS_NO_LOG_SYNC) {
ret = btrfs_end_transaction(trans);
@ -2001,7 +2001,7 @@ out:
out_release_extents:
btrfs_release_log_ctx_extents(&ctx);
btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
goto out;
}
@ -2644,7 +2644,7 @@ static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
truncated_block = true;
ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
if (ret) {
btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
return ret;
}
}
@ -2743,7 +2743,7 @@ out_only_mutex:
ret = ret2;
}
}
btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
return ret;
}
@ -3104,7 +3104,7 @@ static long btrfs_fallocate(struct file *file, int mode,
if (mode & FALLOC_FL_ZERO_RANGE) {
ret = btrfs_zero_range(inode, offset, len, mode);
btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
return ret;
}
@ -3202,7 +3202,7 @@ out_unlock:
unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
&cached_state);
out:
btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
extent_changeset_free(data_reserved);
return ret;
}
@ -3693,7 +3693,7 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
case SEEK_HOLE:
btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
offset = find_desired_extent(BTRFS_I(inode), offset, whence);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
break;
}
@ -3797,7 +3797,7 @@ again:
goto again;
}
}
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
return ret < 0 ? ret : read;
}

View File

@ -202,14 +202,14 @@ int btrfs_inode_lock(struct btrfs_inode *inode, unsigned int ilock_flags)
* ilock_flags should contain the same bits set as passed to btrfs_inode_lock()
* to decide whether the lock acquired is shared or exclusive.
*/
void btrfs_inode_unlock(struct inode *inode, unsigned int ilock_flags)
void btrfs_inode_unlock(struct btrfs_inode *inode, unsigned int ilock_flags)
{
if (ilock_flags & BTRFS_ILOCK_MMAP)
up_write(&BTRFS_I(inode)->i_mmap_lock);
up_write(&inode->i_mmap_lock);
if (ilock_flags & BTRFS_ILOCK_SHARED)
inode_unlock_shared(inode);
inode_unlock_shared(&inode->vfs_inode);
else
inode_unlock(inode);
inode_unlock(&inode->vfs_inode);
}
/*
@ -10277,7 +10277,7 @@ static ssize_t btrfs_encoded_read_inline(
read_extent_buffer(leaf, tmp, ptr, count);
btrfs_release_path(path);
unlock_extent(io_tree, start, lockend, cached_state);
btrfs_inode_unlock(&inode->vfs_inode, BTRFS_ILOCK_SHARED);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
*unlocked = true;
ret = copy_to_iter(tmp, count, iter);
@ -10480,7 +10480,7 @@ static ssize_t btrfs_encoded_read_regular(struct kiocb *iocb,
goto out;
unlock_extent(io_tree, start, lockend, cached_state);
btrfs_inode_unlock(&inode->vfs_inode, BTRFS_ILOCK_SHARED);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
*unlocked = true;
if (compressed) {
@ -10532,7 +10532,7 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
if (iocb->ki_pos >= inode->vfs_inode.i_size) {
btrfs_inode_unlock(&inode->vfs_inode, BTRFS_ILOCK_SHARED);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
return 0;
}
start = ALIGN_DOWN(iocb->ki_pos, fs_info->sectorsize);
@ -10630,7 +10630,7 @@ ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
if (disk_bytenr == EXTENT_MAP_HOLE) {
unlock_extent(io_tree, start, lockend, &cached_state);
btrfs_inode_unlock(&inode->vfs_inode, BTRFS_ILOCK_SHARED);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
unlocked = true;
ret = iov_iter_zero(count, iter);
if (ret != count)
@ -10653,7 +10653,7 @@ out_unlock_extent:
unlock_extent(io_tree, start, lockend, &cached_state);
out_unlock_inode:
if (!unlocked)
btrfs_inode_unlock(&inode->vfs_inode, BTRFS_ILOCK_SHARED);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
return ret;
}

View File

@ -1002,7 +1002,7 @@ out_up_read:
out_dput:
dput(dentry);
out_unlock:
btrfs_inode_unlock(dir, 0);
btrfs_inode_unlock(BTRFS_I(dir), 0);
return error;
}
@ -2530,14 +2530,14 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
btrfs_inode_lock(BTRFS_I(inode), 0);
err = btrfs_delete_subvolume(dir, dentry);
btrfs_inode_unlock(inode, 0);
btrfs_inode_unlock(BTRFS_I(inode), 0);
if (!err)
d_delete_notify(dir, dentry);
out_dput:
dput(dentry);
out_unlock_dir:
btrfs_inode_unlock(dir, 0);
btrfs_inode_unlock(BTRFS_I(dir), 0);
free_subvol_name:
kfree(subvol_name_ptr);
free_parent:

View File

@ -911,7 +911,7 @@ loff_t btrfs_remap_file_range(struct file *src_file, loff_t off,
out_unlock:
if (same_inode) {
btrfs_inode_unlock(src_inode, BTRFS_ILOCK_MMAP);
btrfs_inode_unlock(BTRFS_I(src_inode), BTRFS_ILOCK_MMAP);
} else {
btrfs_double_mmap_unlock(src_inode, dst_inode);
unlock_two_nondirectories(src_inode, dst_inode);

View File

@ -2894,7 +2894,7 @@ static noinline_for_stack int prealloc_file_extent_cluster(
if (ret)
break;
}
btrfs_inode_unlock(&inode->vfs_inode, 0);
btrfs_inode_unlock(inode, 0);
if (cur_offset < prealloc_end)
btrfs_free_reserved_data_space_noquota(inode->root->fs_info,