nilfs2: convert nilfs_segctor_complete_write to use folios

Use the new folio APIs, saving five calls to compound_head().  This
includes the last callers of nilfs_end_page_io(), so remove that too.

Link: https://lkml.kernel.org/r/20231114084436.2755-4-konishi.ryusuke@gmail.com
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2023-11-14 17:44:19 +09:00 committed by Andrew Morton
parent 50196f0081
commit 3cd36212bf
1 changed files with 21 additions and 28 deletions

View File

@ -1778,13 +1778,6 @@ static void nilfs_end_folio_io(struct folio *folio, int err)
folio_end_writeback(folio);
}
static void nilfs_end_page_io(struct page *page, int err)
{
if (!page)
return;
nilfs_end_folio_io(page_folio(page), err);
}
static void nilfs_abort_logs(struct list_head *logs, int err)
{
struct nilfs_segment_buffer *segbuf;
@ -1867,7 +1860,7 @@ static void nilfs_set_next_segment(struct the_nilfs *nilfs,
static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
{
struct nilfs_segment_buffer *segbuf;
struct page *bd_page = NULL, *fs_page = NULL;
struct folio *bd_folio = NULL, *fs_folio = NULL;
struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
int update_sr = false;
@ -1878,21 +1871,21 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
b_assoc_buffers) {
set_buffer_uptodate(bh);
clear_buffer_dirty(bh);
if (bh->b_page != bd_page) {
if (bd_page)
end_page_writeback(bd_page);
bd_page = bh->b_page;
if (bh->b_folio != bd_folio) {
if (bd_folio)
folio_end_writeback(bd_folio);
bd_folio = bh->b_folio;
}
}
/*
* We assume that the buffers which belong to the same page
* We assume that the buffers which belong to the same folio
* continue over the buffer list.
* Under this assumption, the last BHs of pages is
* identifiable by the discontinuity of bh->b_page
* (page != fs_page).
* Under this assumption, the last BHs of folios is
* identifiable by the discontinuity of bh->b_folio
* (folio != fs_folio).
*
* For B-tree node blocks, however, this assumption is not
* guaranteed. The cleanup code of B-tree node pages needs
* guaranteed. The cleanup code of B-tree node folios needs
* special care.
*/
list_for_each_entry(bh, &segbuf->sb_payload_buffers,
@ -1905,16 +1898,16 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
set_mask_bits(&bh->b_state, clear_bits, set_bits);
if (bh == segbuf->sb_super_root) {
if (bh->b_page != bd_page) {
end_page_writeback(bd_page);
bd_page = bh->b_page;
if (bh->b_folio != bd_folio) {
folio_end_writeback(bd_folio);
bd_folio = bh->b_folio;
}
update_sr = true;
break;
}
if (bh->b_page != fs_page) {
nilfs_end_page_io(fs_page, 0);
fs_page = bh->b_page;
if (bh->b_folio != fs_folio) {
nilfs_end_folio_io(fs_folio, 0);
fs_folio = bh->b_folio;
}
}
@ -1928,13 +1921,13 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
}
}
/*
* Since pages may continue over multiple segment buffers,
* end of the last page must be checked outside of the loop.
* Since folios may continue over multiple segment buffers,
* end of the last folio must be checked outside of the loop.
*/
if (bd_page)
end_page_writeback(bd_page);
if (bd_folio)
folio_end_writeback(bd_folio);
nilfs_end_page_io(fs_page, 0);
nilfs_end_folio_io(fs_folio, 0);
nilfs_drop_collected_inodes(&sci->sc_dirty_files);