for-6.5-rc3-tag

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEE8rQSAMVO+zA4DBdWxWXV+ddtWDsFAmTCTAsACgkQxWXV+ddt
 WDvhYhAAluWrfM2ZzhY/tdDeKUNpf0NIAFGZIV4QP/2E43yIPC2+xMPqW/AnBnIP
 k28gOhgoH7LBP/cr0IrFHMz8Glges3cHz1UxFjJZgjiU3mAA0mgkIttPpzms7vqi
 3SVUxL2bJkebJy53nOpZcHlrcWveg+q0hTUslquCYBb3dA4gb61HBwzA2e0wKFeB
 wYw/gQtEy3TkHQPAxVjUF28ASoaroNKsE9QjfLZV0FDn0u0zBFxqpqj7bFUay++i
 sG3nPVZsqKcgIX7sUSwrpv4XAFu8fHz+GAQqCNqTxKCJ0ZZzsgzJtKs+12rv7dZC
 EvRt0jEt+DgwvmEy7j250TEbcI9rMaQuny8yt2j9sNKH/m9bW0BjptCtwghDoL89
 0D6qqicHbA+dJNq8/kDyxV6xC2Git2Ck0fpOfiU7YzhAFECZc/DkidvXa1keMUay
 usspO+YHOjDtlq0zJ0xixbxCseJfrj4habieVKZ/CnAvb84082ZiLcMxFqop/ewB
 WHKNB0O2+P78xoa7/Be6tp/w1HaaW8ZHvkPicD9d4khKJrXAKLNc/Xny4OqRT14z
 sWWaFuNjC7kIUT15EAQNj0wgymA7XcTL9gM1uuSO95PN+M3j4CleApzEvR3dn9FX
 gmoxuwVfVsJKKcwo6WFByqzu03kuSladEFasSHQAJbh3jyU9LUY=
 =Y/po
 -----END PGP SIGNATURE-----

Merge tag 'for-6.5-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs fixes from David Sterba:

 - fix accounting of global block reserve size when block group tree is
   enabled

 - the async discard has been enabled in 6.2 unconditionally, but for
   zoned mode it does not make that much sense to do it asynchronously
   as the zones are reset as needed

 - error handling and proper error value propagation fixes

* tag 'for-6.5-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  btrfs: check for commit error at btrfs_attach_transaction_barrier()
  btrfs: check if the transaction was aborted at btrfs_wait_for_commit()
  btrfs: remove BUG_ON()'s in add_new_free_space()
  btrfs: account block group tree when calculating global reserve size
  btrfs: zoned: do not enable async discard
This commit is contained in:
Linus Torvalds 2023-07-27 11:44:08 -07:00
commit 64de76ce8e
7 changed files with 75 additions and 29 deletions

View file

@ -499,12 +499,16 @@ static void fragment_free_space(struct btrfs_block_group *block_group)
* used yet since their free space will be released as soon as the transaction
* commits.
*/
u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end)
int add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end,
u64 *total_added_ret)
{
struct btrfs_fs_info *info = block_group->fs_info;
u64 extent_start, extent_end, size, total_added = 0;
u64 extent_start, extent_end, size;
int ret;
if (total_added_ret)
*total_added_ret = 0;
while (start < end) {
ret = find_first_extent_bit(&info->excluded_extents, start,
&extent_start, &extent_end,
@ -517,10 +521,12 @@ u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end
start = extent_end + 1;
} else if (extent_start > start && extent_start < end) {
size = extent_start - start;
total_added += size;
ret = btrfs_add_free_space_async_trimmed(block_group,
start, size);
BUG_ON(ret); /* -ENOMEM or logic error */
if (ret)
return ret;
if (total_added_ret)
*total_added_ret += size;
start = extent_end + 1;
} else {
break;
@ -529,13 +535,15 @@ u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end
if (start < end) {
size = end - start;
total_added += size;
ret = btrfs_add_free_space_async_trimmed(block_group, start,
size);
BUG_ON(ret); /* -ENOMEM or logic error */
if (ret)
return ret;
if (total_added_ret)
*total_added_ret += size;
}
return total_added;
return 0;
}
/*
@ -779,8 +787,13 @@ static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
if (key.type == BTRFS_EXTENT_ITEM_KEY ||
key.type == BTRFS_METADATA_ITEM_KEY) {
total_found += add_new_free_space(block_group, last,
key.objectid);
u64 space_added;
ret = add_new_free_space(block_group, last, key.objectid,
&space_added);
if (ret)
goto out;
total_found += space_added;
if (key.type == BTRFS_METADATA_ITEM_KEY)
last = key.objectid +
fs_info->nodesize;
@ -795,11 +808,10 @@ static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
}
path->slots[0]++;
}
ret = 0;
total_found += add_new_free_space(block_group, last,
block_group->start + block_group->length);
ret = add_new_free_space(block_group, last,
block_group->start + block_group->length,
NULL);
out:
btrfs_free_path(path);
return ret;
@ -2294,9 +2306,11 @@ static int read_one_block_group(struct btrfs_fs_info *info,
btrfs_free_excluded_extents(cache);
} else if (cache->used == 0) {
cache->cached = BTRFS_CACHE_FINISHED;
add_new_free_space(cache, cache->start,
cache->start + cache->length);
ret = add_new_free_space(cache, cache->start,
cache->start + cache->length, NULL);
btrfs_free_excluded_extents(cache);
if (ret)
goto error;
}
ret = btrfs_add_block_group_cache(info, cache);
@ -2740,9 +2754,12 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
return ERR_PTR(ret);
}
add_new_free_space(cache, chunk_offset, chunk_offset + size);
ret = add_new_free_space(cache, chunk_offset, chunk_offset + size, NULL);
btrfs_free_excluded_extents(cache);
if (ret) {
btrfs_put_block_group(cache);
return ERR_PTR(ret);
}
/*
* Ensure the corresponding space_info object is created and

View file

@ -289,8 +289,8 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait);
void btrfs_put_caching_control(struct btrfs_caching_control *ctl);
struct btrfs_caching_control *btrfs_get_caching_control(
struct btrfs_block_group *cache);
u64 add_new_free_space(struct btrfs_block_group *block_group,
u64 start, u64 end);
int add_new_free_space(struct btrfs_block_group *block_group,
u64 start, u64 end, u64 *total_added_ret);
struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
struct btrfs_fs_info *fs_info,
const u64 chunk_offset);

View file

@ -349,6 +349,11 @@ void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info)
}
read_unlock(&fs_info->global_root_lock);
if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE)) {
num_bytes += btrfs_root_used(&fs_info->block_group_root->root_item);
min_items++;
}
/*
* But we also want to reserve enough space so we can do the fallback
* global reserve for an unlink, which is an additional

View file

@ -3438,11 +3438,16 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
* For devices supporting discard turn on discard=async automatically,
* unless it's already set or disabled. This could be turned off by
* nodiscard for the same mount.
*
* The zoned mode piggy backs on the discard functionality for
* resetting a zone. There is no reason to delay the zone reset as it is
* fast enough. So, do not enable async discard for zoned mode.
*/
if (!(btrfs_test_opt(fs_info, DISCARD_SYNC) ||
btrfs_test_opt(fs_info, DISCARD_ASYNC) ||
btrfs_test_opt(fs_info, NODISCARD)) &&
fs_info->fs_devices->discardable) {
fs_info->fs_devices->discardable &&
!btrfs_is_zoned(fs_info)) {
btrfs_set_and_info(fs_info, DISCARD_ASYNC,
"auto enabling async discard");
}

View file

@ -1515,9 +1515,13 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
if (prev_bit == 0 && bit == 1) {
extent_start = offset;
} else if (prev_bit == 1 && bit == 0) {
total_found += add_new_free_space(block_group,
extent_start,
offset);
u64 space_added;
ret = add_new_free_space(block_group, extent_start,
offset, &space_added);
if (ret)
goto out;
total_found += space_added;
if (total_found > CACHING_CTL_WAKE_UP) {
total_found = 0;
wake_up(&caching_ctl->wait);
@ -1529,8 +1533,9 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
}
}
if (prev_bit == 1) {
total_found += add_new_free_space(block_group, extent_start,
end);
ret = add_new_free_space(block_group, extent_start, end, NULL);
if (ret)
goto out;
extent_count++;
}
@ -1569,6 +1574,8 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
end = block_group->start + block_group->length;
while (1) {
u64 space_added;
ret = btrfs_next_item(root, path);
if (ret < 0)
goto out;
@ -1583,8 +1590,11 @@ static int load_free_space_extents(struct btrfs_caching_control *caching_ctl,
ASSERT(key.type == BTRFS_FREE_SPACE_EXTENT_KEY);
ASSERT(key.objectid < end && key.objectid + key.offset <= end);
total_found += add_new_free_space(block_group, key.objectid,
key.objectid + key.offset);
ret = add_new_free_space(block_group, key.objectid,
key.objectid + key.offset, &space_added);
if (ret)
goto out;
total_found += space_added;
if (total_found > CACHING_CTL_WAKE_UP) {
total_found = 0;
wake_up(&caching_ctl->wait);

View file

@ -826,8 +826,13 @@ btrfs_attach_transaction_barrier(struct btrfs_root *root)
trans = start_transaction(root, 0, TRANS_ATTACH,
BTRFS_RESERVE_NO_FLUSH, true);
if (trans == ERR_PTR(-ENOENT))
btrfs_wait_for_commit(root->fs_info, 0);
if (trans == ERR_PTR(-ENOENT)) {
int ret;
ret = btrfs_wait_for_commit(root->fs_info, 0);
if (ret)
return ERR_PTR(ret);
}
return trans;
}
@ -931,6 +936,7 @@ int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
}
wait_for_commit(cur_trans, TRANS_STATE_COMPLETED);
ret = cur_trans->aborted;
btrfs_put_transaction(cur_trans);
out:
return ret;

View file

@ -805,6 +805,9 @@ int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info)
return -EINVAL;
}
btrfs_clear_and_info(info, DISCARD_ASYNC,
"zoned: async discard ignored and disabled for zoned mode");
return 0;
}