fs: convert block_write_full_page to block_write_full_folio

Convert the function to be compatible with writepage_t so that it can be
passed to write_cache_pages() by blkdev.  This removes a call to
compound_head().  We can also remove the function export as both callers
are built-in.

Link: https://lkml.kernel.org/r/20231215200245.748418-14-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2023-12-15 20:02:44 +00:00 committed by Andrew Morton
parent af34acc24b
commit 17bf23a981
9 changed files with 35 additions and 22 deletions

View File

@ -410,9 +410,24 @@ static int blkdev_get_block(struct inode *inode, sector_t iblock,
return 0; return 0;
} }
static int blkdev_writepage(struct page *page, struct writeback_control *wbc) /*
* We cannot call mpage_writepages() as it does not take the buffer lock.
* We must use block_write_full_folio() directly which holds the buffer
* lock. The buffer lock provides the synchronisation with writeback
* that filesystems rely on when they use the blockdev's mapping.
*/
static int blkdev_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{ {
return block_write_full_page(page, blkdev_get_block, wbc); struct blk_plug plug;
int err;
blk_start_plug(&plug);
err = write_cache_pages(mapping, wbc, block_write_full_folio,
blkdev_get_block);
blk_finish_plug(&plug);
return err;
} }
static int blkdev_read_folio(struct file *file, struct folio *folio) static int blkdev_read_folio(struct file *file, struct folio *folio)
@ -449,7 +464,7 @@ const struct address_space_operations def_blk_aops = {
.invalidate_folio = block_invalidate_folio, .invalidate_folio = block_invalidate_folio,
.read_folio = blkdev_read_folio, .read_folio = blkdev_read_folio,
.readahead = blkdev_readahead, .readahead = blkdev_readahead,
.writepage = blkdev_writepage, .writepages = blkdev_writepages,
.write_begin = blkdev_write_begin, .write_begin = blkdev_write_begin,
.write_end = blkdev_write_end, .write_end = blkdev_write_end,
.migrate_folio = buffer_migrate_folio_norefs, .migrate_folio = buffer_migrate_folio_norefs,

View File

@ -372,7 +372,7 @@ static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
} }
/* /*
* Completion handler for block_write_full_page() - pages which are unlocked * Completion handler for block_write_full_folio() - pages which are unlocked
* during I/O, and which have PageWriteback cleared upon I/O completion. * during I/O, and which have PageWriteback cleared upon I/O completion.
*/ */
void end_buffer_async_write(struct buffer_head *bh, int uptodate) void end_buffer_async_write(struct buffer_head *bh, int uptodate)
@ -1771,18 +1771,18 @@ static struct buffer_head *folio_create_buffers(struct folio *folio,
*/ */
/* /*
* While block_write_full_page is writing back the dirty buffers under * While block_write_full_folio is writing back the dirty buffers under
* the page lock, whoever dirtied the buffers may decide to clean them * the page lock, whoever dirtied the buffers may decide to clean them
* again at any time. We handle that by only looking at the buffer * again at any time. We handle that by only looking at the buffer
* state inside lock_buffer(). * state inside lock_buffer().
* *
* If block_write_full_page() is called for regular writeback * If block_write_full_folio() is called for regular writeback
* (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
* locked buffer. This only can happen if someone has written the buffer * locked buffer. This only can happen if someone has written the buffer
* directly, with submit_bh(). At the address_space level PageWriteback * directly, with submit_bh(). At the address_space level PageWriteback
* prevents this contention from occurring. * prevents this contention from occurring.
* *
* If block_write_full_page() is called with wbc->sync_mode == * If block_write_full_folio() is called with wbc->sync_mode ==
* WB_SYNC_ALL, the writes are posted using REQ_SYNC; this * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
* causes the writes to be flagged as synchronous writes. * causes the writes to be flagged as synchronous writes.
*/ */
@ -1829,7 +1829,7 @@ int __block_write_full_folio(struct inode *inode, struct folio *folio,
* truncate in progress. * truncate in progress.
*/ */
/* /*
* The buffer was zeroed by block_write_full_page() * The buffer was zeroed by block_write_full_folio()
*/ */
clear_buffer_dirty(bh); clear_buffer_dirty(bh);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
@ -2696,10 +2696,9 @@ EXPORT_SYMBOL(block_truncate_page);
/* /*
* The generic ->writepage function for buffer-backed address_spaces * The generic ->writepage function for buffer-backed address_spaces
*/ */
int block_write_full_page(struct page *page, get_block_t *get_block, int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
struct writeback_control *wbc) void *get_block)
{ {
struct folio *folio = page_folio(page);
struct inode * const inode = folio->mapping->host; struct inode * const inode = folio->mapping->host;
loff_t i_size = i_size_read(inode); loff_t i_size = i_size_read(inode);
@ -2726,7 +2725,6 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
return __block_write_full_folio(inode, folio, get_block, wbc, return __block_write_full_folio(inode, folio, get_block, wbc,
end_buffer_async_write); end_buffer_async_write);
} }
EXPORT_SYMBOL(block_write_full_page);
sector_t generic_block_bmap(struct address_space *mapping, sector_t block, sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
get_block_t *get_block) get_block_t *get_block)

View File

@ -444,7 +444,7 @@ int ext4_bio_write_folio(struct ext4_io_submit *io, struct folio *folio,
folio_clear_error(folio); folio_clear_error(folio);
/* /*
* Comments copied from block_write_full_page: * Comments copied from block_write_full_folio:
* *
* The folio straddles i_size. It must be zeroed out on each and every * The folio straddles i_size. It must be zeroed out on each and every
* writepage invocation because it may be mmapped. "A file is mapped * writepage invocation because it may be mmapped. "A file is mapped

View File

@ -82,11 +82,11 @@ static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
} }
/** /**
* gfs2_write_jdata_folio - gfs2 jdata-specific version of block_write_full_page * gfs2_write_jdata_folio - gfs2 jdata-specific version of block_write_full_folio
* @folio: The folio to write * @folio: The folio to write
* @wbc: The writeback control * @wbc: The writeback control
* *
* This is the same as calling block_write_full_page, but it also * This is the same as calling block_write_full_folio, but it also
* writes pages outside of i_size * writes pages outside of i_size
*/ */
static int gfs2_write_jdata_folio(struct folio *folio, static int gfs2_write_jdata_folio(struct folio *folio,

View File

@ -642,7 +642,7 @@ confused:
/* /*
* The caller has a ref on the inode, so *mapping is stable * The caller has a ref on the inode, so *mapping is stable
*/ */
ret = block_write_full_page(&folio->page, mpd->get_block, wbc); ret = block_write_full_folio(folio, wbc, mpd->get_block);
mapping_set_error(mapping, ret); mapping_set_error(mapping, ret);
out: out:
mpd->bio = bio; mpd->bio = bio;

View File

@ -1304,7 +1304,7 @@ done:
* page cleaned. The VM has already locked the page and marked it clean. * page cleaned. The VM has already locked the page and marked it clean.
* *
* For non-resident attributes, ntfs_writepage() writes the @page by calling * For non-resident attributes, ntfs_writepage() writes the @page by calling
* the ntfs version of the generic block_write_full_page() function, * the ntfs version of the generic block_write_full_folio() function,
* ntfs_write_block(), which in turn if necessary creates and writes the * ntfs_write_block(), which in turn if necessary creates and writes the
* buffers associated with the page asynchronously. * buffers associated with the page asynchronously.
* *
@ -1314,7 +1314,7 @@ done:
* vfs inode dirty code path for the inode the mft record belongs to or via the * vfs inode dirty code path for the inode the mft record belongs to or via the
* vm page dirty code path for the page the mft record is in. * vm page dirty code path for the page the mft record is in.
* *
* Based on ntfs_read_folio() and fs/buffer.c::block_write_full_page(). * Based on ntfs_read_folio() and fs/buffer.c::block_write_full_folio().
* *
* Return 0 on success and -errno on error. * Return 0 on success and -errno on error.
*/ */

View File

@ -6934,7 +6934,7 @@ static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end,
* nonzero data on subsequent file extends. * nonzero data on subsequent file extends.
* *
* We need to call this before i_size is updated on the inode because * We need to call this before i_size is updated on the inode because
* otherwise block_write_full_page() will skip writeout of pages past * otherwise block_write_full_folio() will skip writeout of pages past
* i_size. * i_size.
*/ */
int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle, int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,

View File

@ -818,7 +818,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
/* /*
* fs-writeback will release the dirty pages without page lock * fs-writeback will release the dirty pages without page lock
* whose offset are over inode size, the release happens at * whose offset are over inode size, the release happens at
* block_write_full_page(). * block_write_full_folio().
*/ */
i_size_write(inode, abs_to); i_size_write(inode, abs_to);
inode->i_blocks = ocfs2_inode_sector_count(inode); inode->i_blocks = ocfs2_inode_sector_count(inode);

View File

@ -252,8 +252,8 @@ void __bh_read_batch(int nr, struct buffer_head *bhs[],
* address_spaces. * address_spaces.
*/ */
void block_invalidate_folio(struct folio *folio, size_t offset, size_t length); void block_invalidate_folio(struct folio *folio, size_t offset, size_t length);
int block_write_full_page(struct page *page, get_block_t *get_block, int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
struct writeback_control *wbc); void *get_block);
int __block_write_full_folio(struct inode *inode, struct folio *folio, int __block_write_full_folio(struct inode *inode, struct folio *folio,
get_block_t *get_block, struct writeback_control *wbc, get_block_t *get_block, struct writeback_control *wbc,
bh_end_io_t *handler); bh_end_io_t *handler);