gfs2: convert stuffed_readpage() to stuffed_read_folio()

Use folio_fill_tail() to implement the unstuffing and folio_end_read() to
simultaneously mark the folio uptodate and unlock it.  Unifies a couple of
code paths.

Link: https://lkml.kernel.org/r/20231107212643.3490372-4-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Andreas Gruenbacher <agruenba@redhat.com>
Cc: Andreas Dilger <adilger.kernel@dilger.ca>
Cc: Darrick J. Wong <djwong@kernel.org>
Cc: Theodore Ts'o <tytso@mit.edu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2023-11-07 21:26:42 +00:00 committed by Andrew Morton
parent 6eaa266b54
commit 78c3c11268
1 changed files with 17 additions and 20 deletions

View File

@ -403,18 +403,18 @@ static int gfs2_jdata_writepages(struct address_space *mapping,
}
/**
* stuffed_readpage - Fill in a Linux folio with stuffed file data
* stuffed_read_folio - Fill in a Linux folio with stuffed file data
* @ip: the inode
* @folio: the folio
*
* Returns: errno
*/
static int stuffed_readpage(struct gfs2_inode *ip, struct folio *folio)
static int stuffed_read_folio(struct gfs2_inode *ip, struct folio *folio)
{
struct buffer_head *dibh;
size_t i_size = i_size_read(&ip->i_inode);
void *data;
int error;
struct buffer_head *dibh = NULL;
size_t dsize = i_size_read(&ip->i_inode);
void *from = NULL;
int error = 0;
/*
* Due to the order of unstuffing files and ->fault(), we can be
@ -422,22 +422,20 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct folio *folio)
* so we need to supply one here. It doesn't happen often.
*/
if (unlikely(folio->index)) {
folio_zero_range(folio, 0, folio_size(folio));
folio_mark_uptodate(folio);
return 0;
dsize = 0;
} else {
error = gfs2_meta_inode_buffer(ip, &dibh);
if (error)
goto out;
from = dibh->b_data + sizeof(struct gfs2_dinode);
}
error = gfs2_meta_inode_buffer(ip, &dibh);
if (error)
return error;
data = dibh->b_data + sizeof(struct gfs2_dinode);
memcpy_to_folio(folio, 0, data, i_size);
folio_zero_range(folio, i_size, folio_size(folio) - i_size);
folio_fill_tail(folio, 0, from, dsize);
brelse(dibh);
folio_mark_uptodate(folio);
out:
folio_end_read(folio, error == 0);
return 0;
return error;
}
/**
@ -456,8 +454,7 @@ static int gfs2_read_folio(struct file *file, struct folio *folio)
(i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
error = iomap_read_folio(folio, &gfs2_iomap_ops);
} else if (gfs2_is_stuffed(ip)) {
error = stuffed_readpage(ip, folio);
folio_unlock(folio);
error = stuffed_read_folio(ip, folio);
} else {
error = mpage_read_folio(folio, gfs2_block_map);
}