f2fs: convert f2fs_sync_meta_pages() to use filemap_get_folios_tag()

Convert function to use folios throughout.  This is in preparation for the
removal of find_get_pages_range_tag().  This change removes 5 calls to
compound_head().

Initially the function was checking if the previous page index is truly
the previous page i.e.  1 index behind the current page.  To convert to
folios and maintain this check we need to make the check folio->index !=
prev + folio_nr_pages(previous folio) since we don't know how many pages
are in a folio.

At index i == 0 the check is guaranteed to succeed, so to workaround
indexing bounds we can simply ignore the check for that specific index. 
This makes the initial assignment of prev trivial, so I removed that as
well.

Also modify a comment in commit_checkpoint for consistency.

Link: https://lkml.kernel.org/r/20230104211448.4804-17-vishal.moola@gmail.com
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Acked-by: Chao Yu <chao@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Vishal Moola (Oracle) 2023-01-04 13:14:41 -08:00 committed by Andrew Morton
parent 4f4a4f0feb
commit 580e7a4926

View file

@ -395,59 +395,62 @@ long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
{
struct address_space *mapping = META_MAPPING(sbi);
pgoff_t index = 0, prev = ULONG_MAX;
struct pagevec pvec;
struct folio_batch fbatch;
long nwritten = 0;
int nr_pages;
int nr_folios;
struct writeback_control wbc = {
.for_reclaim = 0,
};
struct blk_plug plug;
pagevec_init(&pvec);
folio_batch_init(&fbatch);
blk_start_plug(&plug);
while ((nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
PAGECACHE_TAG_DIRTY))) {
while ((nr_folios = filemap_get_folios_tag(mapping, &index,
(pgoff_t)-1,
PAGECACHE_TAG_DIRTY, &fbatch))) {
int i;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
for (i = 0; i < nr_folios; i++) {
struct folio *folio = fbatch.folios[i];
if (prev == ULONG_MAX)
prev = page->index - 1;
if (nr_to_write != LONG_MAX && page->index != prev + 1) {
pagevec_release(&pvec);
if (nr_to_write != LONG_MAX && i != 0 &&
folio->index != prev +
folio_nr_pages(fbatch.folios[i-1])) {
folio_batch_release(&fbatch);
goto stop;
}
lock_page(page);
folio_lock(folio);
if (unlikely(page->mapping != mapping)) {
if (unlikely(folio->mapping != mapping)) {
continue_unlock:
unlock_page(page);
folio_unlock(folio);
continue;
}
if (!PageDirty(page)) {
if (!folio_test_dirty(folio)) {
/* someone wrote it for us */
goto continue_unlock;
}
f2fs_wait_on_page_writeback(page, META, true, true);
f2fs_wait_on_page_writeback(&folio->page, META,
true, true);
if (!clear_page_dirty_for_io(page))
if (!folio_clear_dirty_for_io(folio))
goto continue_unlock;
if (__f2fs_write_meta_page(page, &wbc, io_type)) {
unlock_page(page);
if (__f2fs_write_meta_page(&folio->page, &wbc,
io_type)) {
folio_unlock(folio);
break;
}
nwritten++;
prev = page->index;
nwritten += folio_nr_pages(folio);
prev = folio->index;
if (unlikely(nwritten >= nr_to_write))
break;
}
pagevec_release(&pvec);
folio_batch_release(&fbatch);
cond_resched();
}
stop:
@ -1403,7 +1406,7 @@ static void commit_checkpoint(struct f2fs_sb_info *sbi,
};
/*
* pagevec_lookup_tag and lock_page again will take
* filemap_get_folios_tag and lock_page again will take
* some extra time. Therefore, f2fs_update_meta_pages and
* f2fs_sync_meta_pages are combined in this function.
*/