btrfs: introduce submit_compressed_bio() for compression

The new helper, submit_compressed_bio(), will aggregate the following
work:

- Increase compressed_bio::pending_bios
- Remap the endio function
- Map and submit the bio

This slightly reorders calls to btrfs_csum_one_bio or
btrfs_lookup_bio_sums but but none of them does anything regarding IO
submission so this is effectively no change. We mainly care about order
of

- atomic_inc
- btrfs_bio_wq_end_io
- btrfs_map_bio

Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Qu Wenruo 2021-09-27 15:21:52 +08:00 committed by David Sterba
parent 6853c64a6e
commit 2d4e0b84b4

View file

@ -419,6 +419,21 @@ static void end_compressed_bio_write(struct bio *bio)
bio_put(bio);
}
static blk_status_t submit_compressed_bio(struct btrfs_fs_info *fs_info,
struct compressed_bio *cb,
struct bio *bio, int mirror_num)
{
blk_status_t ret;
ASSERT(bio->bi_iter.bi_size);
atomic_inc(&cb->pending_bios);
ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
if (ret)
return ret;
ret = btrfs_map_bio(fs_info, bio, mirror_num);
return ret;
}
/*
* worker function to build and submit bios for previously compressed pages.
* The corresponding pages in the inode should be marked for writeback
@ -514,19 +529,13 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
page->mapping = NULL;
if (submit || len < PAGE_SIZE) {
atomic_inc(&cb->pending_bios);
ret = btrfs_bio_wq_end_io(fs_info, bio,
BTRFS_WQ_ENDIO_DATA);
if (ret)
goto finish_cb;
if (!skip_sum) {
ret = btrfs_csum_one_bio(inode, bio, start, 1);
if (ret)
goto finish_cb;
}
ret = btrfs_map_bio(fs_info, bio, 0);
ret = submit_compressed_bio(fs_info, cb, bio, 0);
if (ret)
goto finish_cb;
@ -553,18 +562,13 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
cond_resched();
}
atomic_inc(&cb->pending_bios);
ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
if (ret)
goto last_bio;
if (!skip_sum) {
ret = btrfs_csum_one_bio(inode, bio, start, 1);
if (ret)
goto last_bio;
}
ret = btrfs_map_bio(fs_info, bio, 0);
ret = submit_compressed_bio(fs_info, cb, bio, 0);
if (ret)
goto last_bio;
@ -868,12 +872,6 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
if (submit || bio_add_page(comp_bio, page, pg_len, 0) < pg_len) {
unsigned int nr_sectors;
atomic_inc(&cb->pending_bios);
ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
BTRFS_WQ_ENDIO_DATA);
if (ret)
goto finish_cb;
ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
if (ret)
goto finish_cb;
@ -882,7 +880,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
fs_info->sectorsize);
sums += fs_info->csum_size * nr_sectors;
ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
ret = submit_compressed_bio(fs_info, cb, comp_bio, mirror_num);
if (ret)
goto finish_cb;
@ -897,16 +895,11 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
cur_disk_byte += pg_len;
}
atomic_inc(&cb->pending_bios);
ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
if (ret)
goto last_bio;
ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
if (ret)
goto last_bio;
ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
ret = submit_compressed_bio(fs_info, cb, comp_bio, mirror_num);
if (ret)
goto last_bio;