mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-30 14:19:16 +00:00
ext4: Ensure ext4_mb_prefetch_fini() is called for all prefetched BGs
Before this patch, the call stack in ext4_run_li_request is as follows: /* * nr = no. of BGs we want to fetch (=s_mb_prefetch) * prefetch_ios = no. of BGs not uptodate after * ext4_read_block_bitmap_nowait() */ next_group = ext4_mb_prefetch(sb, group, nr, prefetch_ios); ext4_mb_prefetch_fini(sb, next_group prefetch_ios); ext4_mb_prefetch_fini() will only try to initialize buddies for BGs in range [next_group - prefetch_ios, next_group). This is incorrect since sometimes (prefetch_ios < nr), which causes ext4_mb_prefetch_fini() to incorrectly ignore some of the BGs that might need initialization. This issue is more notable now with the previous patch enabling "fetching" of BLOCK_UNINIT BGs which are marked buffer_uptodate by default. Fix this by passing nr to ext4_mb_prefetch_fini() instead of prefetch_ios so that it considers the right range of groups. Similarly, make sure we don't pass nr=0 to ext4_mb_prefetch_fini() in ext4_mb_regular_allocator() since we might have prefetched BLOCK_UNINIT groups that would need buddy initialization. Signed-off-by: Ojaswin Mujoo <ojaswin@linux.ibm.com> Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com> Reviewed-by: Jan Kara <jack@suse.cz> Link: https://lore.kernel.org/r/05e648ae04ec5b754207032823e9c1de9a54f87a.1685449706.git.ojaswin@linux.ibm.com Signed-off-by: Theodore Ts'o <tytso@mit.edu>
This commit is contained in:
parent
3c6296046c
commit
4f3d1e4533
2 changed files with 4 additions and 11 deletions
|
@ -2734,8 +2734,6 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
|
|||
if ((prefetch_grp == group) &&
|
||||
(cr > CR1 ||
|
||||
prefetch_ios < sbi->s_mb_prefetch_limit)) {
|
||||
unsigned int curr_ios = prefetch_ios;
|
||||
|
||||
nr = sbi->s_mb_prefetch;
|
||||
if (ext4_has_feature_flex_bg(sb)) {
|
||||
nr = 1 << sbi->s_log_groups_per_flex;
|
||||
|
@ -2744,8 +2742,6 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
|
|||
}
|
||||
prefetch_grp = ext4_mb_prefetch(sb, group,
|
||||
nr, &prefetch_ios);
|
||||
if (prefetch_ios == curr_ios)
|
||||
nr = 0;
|
||||
}
|
||||
|
||||
/* This now checks without needing the buddy page */
|
||||
|
|
|
@ -3692,16 +3692,13 @@ static int ext4_run_li_request(struct ext4_li_request *elr)
|
|||
ext4_group_t group = elr->lr_next_group;
|
||||
unsigned int prefetch_ios = 0;
|
||||
int ret = 0;
|
||||
int nr = EXT4_SB(sb)->s_mb_prefetch;
|
||||
u64 start_time;
|
||||
|
||||
if (elr->lr_mode == EXT4_LI_MODE_PREFETCH_BBITMAP) {
|
||||
elr->lr_next_group = ext4_mb_prefetch(sb, group,
|
||||
EXT4_SB(sb)->s_mb_prefetch, &prefetch_ios);
|
||||
if (prefetch_ios)
|
||||
ext4_mb_prefetch_fini(sb, elr->lr_next_group,
|
||||
prefetch_ios);
|
||||
trace_ext4_prefetch_bitmaps(sb, group, elr->lr_next_group,
|
||||
prefetch_ios);
|
||||
elr->lr_next_group = ext4_mb_prefetch(sb, group, nr, &prefetch_ios);
|
||||
ext4_mb_prefetch_fini(sb, elr->lr_next_group, nr);
|
||||
trace_ext4_prefetch_bitmaps(sb, group, elr->lr_next_group, nr);
|
||||
if (group >= elr->lr_next_group) {
|
||||
ret = 1;
|
||||
if (elr->lr_first_not_zeroed != ngroups &&
|
||||
|
|
Loading…
Reference in a new issue