mm: remove folio_account_redirty

Fold folio_account_redirty into folio_redirty_for_writepage now
that all other users except for the also unused account_page_redirty
wrapper are gone.

Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Christoph Hellwig 2023-06-28 17:31:44 +02:00 committed by David Sterba
parent 256b0cf90d
commit ed2da9246f
2 changed files with 14 additions and 40 deletions

View File

@ -375,11 +375,6 @@ void tag_pages_for_writeback(struct address_space *mapping,
pgoff_t start, pgoff_t end);
bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio);
void folio_account_redirty(struct folio *folio);
static inline void account_page_redirty(struct page *page)
{
folio_account_redirty(page_folio(page));
}
bool folio_redirty_for_writepage(struct writeback_control *, struct folio *);
bool redirty_page_for_writepage(struct writeback_control *, struct page *);

View File

@ -1193,7 +1193,7 @@ static void wb_update_write_bandwidth(struct bdi_writeback *wb,
* write_bandwidth = ---------------------------------------------------
* period
*
* @written may have decreased due to folio_account_redirty().
* @written may have decreased due to folio_redirty_for_writepage().
* Avoid underflowing @bw calculation.
*/
bw = written - min(written, wb->written_stamp);
@ -2711,37 +2711,6 @@ bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio)
}
EXPORT_SYMBOL(filemap_dirty_folio);
/**
* folio_account_redirty - Manually account for redirtying a page.
* @folio: The folio which is being redirtied.
*
* Most filesystems should call folio_redirty_for_writepage() instead
* of this fuction. If your filesystem is doing writeback outside the
* context of a writeback_control(), it can call this when redirtying
* a folio, to de-account the dirty counters (NR_DIRTIED, WB_DIRTIED,
* tsk->nr_dirtied), so that they match the written counters (NR_WRITTEN,
* WB_WRITTEN) in long term. The mismatches will lead to systematic errors
* in balanced_dirty_ratelimit and the dirty pages position control.
*/
void folio_account_redirty(struct folio *folio)
{
struct address_space *mapping = folio->mapping;
if (mapping && mapping_can_writeback(mapping)) {
struct inode *inode = mapping->host;
struct bdi_writeback *wb;
struct wb_lock_cookie cookie = {};
long nr = folio_nr_pages(folio);
wb = unlocked_inode_to_wb_begin(inode, &cookie);
current->nr_dirtied -= nr;
node_stat_mod_folio(folio, NR_DIRTIED, -nr);
wb_stat_mod(wb, WB_DIRTIED, -nr);
unlocked_inode_to_wb_end(inode, &cookie);
}
}
EXPORT_SYMBOL(folio_account_redirty);
/**
* folio_redirty_for_writepage - Decline to write a dirty folio.
* @wbc: The writeback control.
@ -2757,13 +2726,23 @@ EXPORT_SYMBOL(folio_account_redirty);
bool folio_redirty_for_writepage(struct writeback_control *wbc,
struct folio *folio)
{
bool ret;
struct address_space *mapping = folio->mapping;
long nr = folio_nr_pages(folio);
bool ret;
wbc->pages_skipped += nr;
ret = filemap_dirty_folio(folio->mapping, folio);
folio_account_redirty(folio);
ret = filemap_dirty_folio(mapping, folio);
if (mapping && mapping_can_writeback(mapping)) {
struct inode *inode = mapping->host;
struct bdi_writeback *wb;
struct wb_lock_cookie cookie = {};
wb = unlocked_inode_to_wb_begin(inode, &cookie);
current->nr_dirtied -= nr;
node_stat_mod_folio(folio, NR_DIRTIED, -nr);
wb_stat_mod(wb, WB_DIRTIED, -nr);
unlocked_inode_to_wb_end(inode, &cookie);
}
return ret;
}
EXPORT_SYMBOL(folio_redirty_for_writepage);