btrfs: zoned: defer advancing meta write pointer

[ Upstream commit 0356ad41e0 ]

We currently advance the meta_write_pointer in
btrfs_check_meta_write_pointer(). That makes it necessary to revert it
when locking the buffer failed. Instead, we can advance it just before
sending the buffer.

Also, this is necessary for the following commit. In the commit, it needs
to release the zoned_meta_io_lock to allow IOs to come in and wait for them
to fill the currently active block group. If we advance the
meta_write_pointer before locking the extent buffer, the following extent
buffer can pass the meta_write_pointer check, resulting in an unaligned
write failure.

Advancing the pointer is still thread-safe as the extent buffer is locked.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
Stable-dep-of: 13bb483d32 ("btrfs: zoned: activate metadata block group on write time")
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Naohiro Aota 2023-08-08 01:12:34 +09:00 committed by Greg Kroah-Hartman
parent 28c26c6fee
commit 1c33e31e80
3 changed files with 4 additions and 26 deletions

View File

@ -1927,15 +1927,14 @@ static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx)
}
if (!lock_extent_buffer_for_io(eb, wbc)) {
btrfs_revert_meta_write_pointer(ctx->zoned_bg, eb);
free_extent_buffer(eb);
return 0;
}
/* Implies write in zoned mode. */
if (ctx->zoned_bg) {
/*
* Implies write in zoned mode. Mark the last eb in a block group.
*/
/* Mark the last eb in the block group. */
btrfs_schedule_zone_finish_bg(ctx->zoned_bg, eb);
ctx->zoned_bg->meta_write_pointer += eb->len;
}
write_one_eb(eb, wbc);
free_extent_buffer(eb);

View File

@ -1792,11 +1792,8 @@ int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
ctx->zoned_bg = block_group;
}
if (block_group->meta_write_pointer == eb->start) {
block_group->meta_write_pointer = eb->start + eb->len;
if (block_group->meta_write_pointer == eb->start)
return 0;
}
/* If for_sync, this hole will be filled with trasnsaction commit. */
if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
@ -1804,16 +1801,6 @@ int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
return -EBUSY;
}
void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
struct extent_buffer *eb)
{
if (!btrfs_is_zoned(eb->fs_info) || !cache)
return;
ASSERT(cache->meta_write_pointer == eb->start + eb->len);
cache->meta_write_pointer = eb->start;
}
int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length)
{
if (!btrfs_dev_is_sequential(device, physical))

View File

@ -60,8 +60,6 @@ bool btrfs_use_zone_append(struct btrfs_bio *bbio);
void btrfs_record_physical_zoned(struct btrfs_bio *bbio);
int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
struct btrfs_eb_write_context *ctx);
void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
struct extent_buffer *eb);
int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length);
int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
u64 physical_start, u64 physical_pos);
@ -194,12 +192,6 @@ static inline int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
return 0;
}
static inline void btrfs_revert_meta_write_pointer(
struct btrfs_block_group *cache,
struct extent_buffer *eb)
{
}
static inline int btrfs_zoned_issue_zeroout(struct btrfs_device *device,
u64 physical, u64 length)
{