btrfs: don't use btrfs_bio_wq_end_io for compressed writes

Compressed write bio completion is the only user of btrfs_bio_wq_end_io
for writes, and the use of btrfs_bio_wq_end_io is a little suboptimal
here as we only real need user context for the final completion of a
compressed_bio structure, and not every single bio completion.

Add a work_struct to struct compressed_bio instead and use that to call
finish_compressed_bio_write.  This allows to remove all handling of
write bios in the btrfs_bio_wq_end_io infrastructure.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Christoph Hellwig 2022-05-26 09:36:38 +02:00 committed by David Sterba
parent 02bb5b7247
commit fed8a72df1
5 changed files with 40 additions and 45 deletions

View file

@ -398,6 +398,14 @@ static void finish_compressed_bio_write(struct compressed_bio *cb)
kfree(cb);
}
static void btrfs_finish_compressed_write_work(struct work_struct *work)
{
struct compressed_bio *cb =
container_of(work, struct compressed_bio, write_end_work);
finish_compressed_bio_write(cb);
}
/*
* Do the cleanup once all the compressed pages hit the disk. This will clear
* writeback on the file pages and free the compressed pages.
@ -409,29 +417,15 @@ static void end_compressed_bio_write(struct bio *bio)
{
struct compressed_bio *cb = bio->bi_private;
if (!dec_and_test_compressed_bio(cb, bio))
goto out;
if (dec_and_test_compressed_bio(cb, bio)) {
struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
btrfs_record_physical_zoned(cb->inode, cb->start, bio);
finish_compressed_bio_write(cb);
out:
btrfs_record_physical_zoned(cb->inode, cb->start, bio);
queue_work(fs_info->compressed_write_workers, &cb->write_end_work);
}
bio_put(bio);
}
static blk_status_t submit_compressed_bio(struct btrfs_fs_info *fs_info,
struct bio *bio, int mirror_num)
{
blk_status_t ret;
ASSERT(bio->bi_iter.bi_size);
ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
if (ret)
return ret;
ret = btrfs_map_bio(fs_info, bio, mirror_num);
return ret;
}
/*
* Allocate a compressed_bio, which will be used to read/write on-disk
* (aka, compressed) * data.
@ -528,7 +522,7 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
cb->compressed_pages = compressed_pages;
cb->compressed_len = compressed_len;
cb->writeback = writeback;
cb->orig_bio = NULL;
INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work);
cb->nr_pages = nr_pages;
if (blkcg_css)
@ -598,7 +592,8 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
goto finish_cb;
}
ret = submit_compressed_bio(fs_info, bio, 0);
ASSERT(bio->bi_iter.bi_size);
ret = btrfs_map_bio(fs_info, bio, 0);
if (ret)
goto finish_cb;
bio = NULL;
@ -935,7 +930,12 @@ void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
fs_info->sectorsize);
sums += fs_info->csum_size * nr_sectors;
ret = submit_compressed_bio(fs_info, comp_bio, mirror_num);
ASSERT(comp_bio->bi_iter.bi_size);
ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
BTRFS_WQ_ENDIO_DATA);
if (ret)
goto finish_cb;
ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
if (ret)
goto finish_cb;
comp_bio = NULL;

View file

@ -61,8 +61,11 @@ struct compressed_bio {
blk_status_t status;
int mirror_num;
/* for reads, this is the bio we are copying the data into */
struct bio *orig_bio;
union {
/* For reads, this is the bio we are copying the data into */
struct bio *orig_bio;
struct work_struct write_end_work;
};
/*
* the start of a variable length array of checksums only

View file

@ -854,7 +854,7 @@ struct btrfs_fs_info {
struct btrfs_workqueue *endio_meta_workers;
struct workqueue_struct *endio_raid56_workers;
struct workqueue_struct *rmw_workers;
struct btrfs_workqueue *endio_meta_write_workers;
struct workqueue_struct *compressed_write_workers;
struct btrfs_workqueue *endio_write_workers;
struct btrfs_workqueue *endio_freespace_worker;
struct btrfs_workqueue *caching_workers;

View file

@ -749,19 +749,10 @@ static void end_workqueue_bio(struct bio *bio)
fs_info = end_io_wq->info;
end_io_wq->status = bio->bi_status;
if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
wq = fs_info->endio_meta_write_workers;
else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
wq = fs_info->endio_freespace_worker;
else
wq = fs_info->endio_write_workers;
} else {
if (end_io_wq->metadata)
wq = fs_info->endio_meta_workers;
else
wq = fs_info->endio_workers;
}
if (end_io_wq->metadata)
wq = fs_info->endio_meta_workers;
else
wq = fs_info->endio_workers;
btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL);
btrfs_queue_work(wq, &end_io_wq->work);
@ -772,6 +763,9 @@ blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
{
struct btrfs_end_io_wq *end_io_wq;
if (WARN_ON_ONCE(btrfs_op(bio) != BTRFS_MAP_WRITE))
return BLK_STS_IOERR;
end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
if (!end_io_wq)
return BLK_STS_RESOURCE;
@ -2281,6 +2275,8 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
destroy_workqueue(fs_info->endio_raid56_workers);
if (fs_info->rmw_workers)
destroy_workqueue(fs_info->rmw_workers);
if (fs_info->compressed_write_workers)
destroy_workqueue(fs_info->compressed_write_workers);
btrfs_destroy_workqueue(fs_info->endio_write_workers);
btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
btrfs_destroy_workqueue(fs_info->delayed_workers);
@ -2295,7 +2291,6 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
* queues can do metadata I/O operations.
*/
btrfs_destroy_workqueue(fs_info->endio_meta_workers);
btrfs_destroy_workqueue(fs_info->endio_meta_write_workers);
}
static void free_root_extent_buffers(struct btrfs_root *root)
@ -2483,15 +2478,14 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info)
fs_info->endio_meta_workers =
btrfs_alloc_workqueue(fs_info, "endio-meta", flags,
max_active, 4);
fs_info->endio_meta_write_workers =
btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags,
max_active, 2);
fs_info->endio_raid56_workers =
alloc_workqueue("btrfs-endio-raid56", flags, max_active);
fs_info->rmw_workers = alloc_workqueue("btrfs-rmw", flags, max_active);
fs_info->endio_write_workers =
btrfs_alloc_workqueue(fs_info, "endio-write", flags,
max_active, 2);
fs_info->compressed_write_workers =
alloc_workqueue("btrfs-compressed-write", flags, max_active);
fs_info->endio_freespace_worker =
btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
max_active, 0);
@ -2506,7 +2500,7 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info)
if (!(fs_info->workers && fs_info->hipri_workers &&
fs_info->delalloc_workers && fs_info->flush_workers &&
fs_info->endio_workers && fs_info->endio_meta_workers &&
fs_info->endio_meta_write_workers &&
fs_info->compressed_write_workers &&
fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
fs_info->endio_freespace_worker && fs_info->rmw_workers &&
fs_info->caching_workers && fs_info->fixup_workers &&

View file

@ -1934,8 +1934,6 @@ static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->endio_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->endio_meta_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->endio_meta_write_workers,
new_pool_size);
btrfs_workqueue_set_max(fs_info->endio_write_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->endio_freespace_worker, new_pool_size);
btrfs_workqueue_set_max(fs_info->delayed_workers, new_pool_size);