mm: free folios in a batch in shrink_folio_list()

Use free_unref_page_batch() to free the folios.  This may increase the
number of IPIs from calling try_to_unmap_flush() more often, but that's
going to be very workload-dependent.  It may even reduce the number of
IPIs as we now batch-free large folios instead of freeing them one at a
time.

Link: https://lkml.kernel.org/r/20240227174254.710559-12-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: David Hildenbrand <david@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2024-02-27 17:42:45 +00:00 committed by Andrew Morton
parent f77171d241
commit bc2ff4cbc3
1 changed files with 9 additions and 11 deletions

View File

@ -1006,14 +1006,15 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
struct pglist_data *pgdat, struct scan_control *sc, struct pglist_data *pgdat, struct scan_control *sc,
struct reclaim_stat *stat, bool ignore_references) struct reclaim_stat *stat, bool ignore_references)
{ {
struct folio_batch free_folios;
LIST_HEAD(ret_folios); LIST_HEAD(ret_folios);
LIST_HEAD(free_folios);
LIST_HEAD(demote_folios); LIST_HEAD(demote_folios);
unsigned int nr_reclaimed = 0; unsigned int nr_reclaimed = 0;
unsigned int pgactivate = 0; unsigned int pgactivate = 0;
bool do_demote_pass; bool do_demote_pass;
struct swap_iocb *plug = NULL; struct swap_iocb *plug = NULL;
folio_batch_init(&free_folios);
memset(stat, 0, sizeof(*stat)); memset(stat, 0, sizeof(*stat));
cond_resched(); cond_resched();
do_demote_pass = can_demote(pgdat->node_id, sc); do_demote_pass = can_demote(pgdat->node_id, sc);
@ -1412,14 +1413,11 @@ free_it:
*/ */
nr_reclaimed += nr_pages; nr_reclaimed += nr_pages;
/* if (folio_batch_add(&free_folios, folio) == 0) {
* Is there need to periodically free_folio_list? It would mem_cgroup_uncharge_folios(&free_folios);
* appear not as the counts should be low try_to_unmap_flush();
*/ free_unref_folios(&free_folios);
if (unlikely(folio_test_large(folio))) }
destroy_large_folio(folio);
else
list_add(&folio->lru, &free_folios);
continue; continue;
activate_locked_split: activate_locked_split:
@ -1483,9 +1481,9 @@ keep:
pgactivate = stat->nr_activate[0] + stat->nr_activate[1]; pgactivate = stat->nr_activate[0] + stat->nr_activate[1];
mem_cgroup_uncharge_list(&free_folios); mem_cgroup_uncharge_folios(&free_folios);
try_to_unmap_flush(); try_to_unmap_flush();
free_unref_page_list(&free_folios); free_unref_folios(&free_folios);
list_splice(&ret_folios, folio_list); list_splice(&ret_folios, folio_list);
count_vm_events(PGACTIVATE, pgactivate); count_vm_events(PGACTIVATE, pgactivate);