vmscan: convert dirty page handling to folios

Mostly this just eliminates calls to compound_head(), but
NR_VMSCAN_IMMEDIATE was being incremented by 1 instead of by nr_pages.

Link: https://lkml.kernel.org/r/20220504182857.4013401-10-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2022-05-12 20:23:02 -07:00 committed by Andrew Morton
parent 09c02e5632
commit 49bd2bf967

View file

@ -1787,28 +1787,31 @@ static unsigned int shrink_page_list(struct list_head *page_list,
} }
} }
if (PageDirty(page)) { if (folio_test_dirty(folio)) {
/* /*
* Only kswapd can writeback filesystem pages * Only kswapd can writeback filesystem folios
* to avoid risk of stack overflow. But avoid * to avoid risk of stack overflow. But avoid
* injecting inefficient single-page IO into * injecting inefficient single-folio I/O into
* flusher writeback as much as possible: only * flusher writeback as much as possible: only
* write pages when we've encountered many * write folios when we've encountered many
* dirty pages, and when we've already scanned * dirty folios, and when we've already scanned
* the rest of the LRU for clean pages and see * the rest of the LRU for clean folios and see
* the same dirty pages again (PageReclaim). * the same dirty folios again (with the reclaim
* flag set).
*/ */
if (page_is_file_lru(page) && if (folio_is_file_lru(folio) &&
(!current_is_kswapd() || !PageReclaim(page) || (!current_is_kswapd() ||
!folio_test_reclaim(folio) ||
!test_bit(PGDAT_DIRTY, &pgdat->flags))) { !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
/* /*
* Immediately reclaim when written back. * Immediately reclaim when written back.
* Similar in principal to deactivate_page() * Similar in principle to deactivate_page()
* except we already have the page isolated * except we already have the folio isolated
* and know it's dirty * and know it's dirty
*/ */
inc_node_page_state(page, NR_VMSCAN_IMMEDIATE); node_stat_mod_folio(folio, NR_VMSCAN_IMMEDIATE,
SetPageReclaim(page); nr_pages);
folio_set_reclaim(folio);
goto activate_locked; goto activate_locked;
} }
@ -1821,8 +1824,8 @@ static unsigned int shrink_page_list(struct list_head *page_list,
goto keep_locked; goto keep_locked;
/* /*
* Page is dirty. Flush the TLB if a writable entry * Folio is dirty. Flush the TLB if a writable entry
* potentially exists to avoid CPU writes after IO * potentially exists to avoid CPU writes after I/O
* starts and then write it out here. * starts and then write it out here.
*/ */
try_to_unmap_flush_dirty(); try_to_unmap_flush_dirty();
@ -1834,23 +1837,24 @@ static unsigned int shrink_page_list(struct list_head *page_list,
case PAGE_SUCCESS: case PAGE_SUCCESS:
stat->nr_pageout += nr_pages; stat->nr_pageout += nr_pages;
if (PageWriteback(page)) if (folio_test_writeback(folio))
goto keep; goto keep;
if (PageDirty(page)) if (folio_test_dirty(folio))
goto keep; goto keep;
/* /*
* A synchronous write - probably a ramdisk. Go * A synchronous write - probably a ramdisk. Go
* ahead and try to reclaim the page. * ahead and try to reclaim the folio.
*/ */
if (!trylock_page(page)) if (!folio_trylock(folio))
goto keep; goto keep;
if (PageDirty(page) || PageWriteback(page)) if (folio_test_dirty(folio) ||
folio_test_writeback(folio))
goto keep_locked; goto keep_locked;
mapping = page_mapping(page); mapping = folio_mapping(folio);
fallthrough; fallthrough;
case PAGE_CLEAN: case PAGE_CLEAN:
; /* try to free the page below */ ; /* try to free the folio below */
} }
} }