mm: rename invalidate_mapping_pagevec to mapping_try_invalidate

We don't use pagevecs for the LRU cache any more, and we don't know that
the failed invalidations were due to the folio being in an LRU cache.  So
rename it to be more accurate.

Link: https://lkml.kernel.org/r/20230621164557.3510324-12-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2023-06-21 17:45:55 +01:00 committed by Andrew Morton
parent 1e0877d58b
commit 1a0fc811f5
3 changed files with 21 additions and 24 deletions

View file

@ -143,7 +143,7 @@ int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
} }
if (end_index >= start_index) { if (end_index >= start_index) {
unsigned long nr_pagevec = 0; unsigned long nr_failed = 0;
/* /*
* It's common to FADV_DONTNEED right after * It's common to FADV_DONTNEED right after
@ -156,17 +156,15 @@ int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
*/ */
lru_add_drain(); lru_add_drain();
invalidate_mapping_pagevec(mapping, mapping_try_invalidate(mapping, start_index, end_index,
start_index, end_index, &nr_failed);
&nr_pagevec);
/* /*
* If fewer pages were invalidated than expected then * The failures may be due to the folio being
* it is possible that some of the pages were on * in the LRU cache of a remote CPU. Drain all
* a per-cpu pagevec for a remote CPU. Drain all * caches and try again.
* pagevecs and try again.
*/ */
if (nr_pagevec) { if (nr_failed) {
lru_add_drain_all(); lru_add_drain_all();
invalidate_mapping_pages(mapping, start_index, invalidate_mapping_pages(mapping, start_index,
end_index); end_index);

View file

@ -133,8 +133,8 @@ int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
bool truncate_inode_partial_folio(struct folio *folio, loff_t start, bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
loff_t end); loff_t end);
long invalidate_inode_page(struct page *page); long invalidate_inode_page(struct page *page);
unsigned long invalidate_mapping_pagevec(struct address_space *mapping, unsigned long mapping_try_invalidate(struct address_space *mapping,
pgoff_t start, pgoff_t end, unsigned long *nr_pagevec); pgoff_t start, pgoff_t end, unsigned long *nr_failed);
/** /**
* folio_evictable - Test whether a folio is evictable. * folio_evictable - Test whether a folio is evictable.

View file

@ -486,18 +486,17 @@ void truncate_inode_pages_final(struct address_space *mapping)
EXPORT_SYMBOL(truncate_inode_pages_final); EXPORT_SYMBOL(truncate_inode_pages_final);
/** /**
* invalidate_mapping_pagevec - Invalidate all the unlocked pages of one inode * mapping_try_invalidate - Invalidate all the evictable folios of one inode
* @mapping: the address_space which holds the pages to invalidate * @mapping: the address_space which holds the folios to invalidate
* @start: the offset 'from' which to invalidate * @start: the offset 'from' which to invalidate
* @end: the offset 'to' which to invalidate (inclusive) * @end: the offset 'to' which to invalidate (inclusive)
* @nr_pagevec: invalidate failed page number for caller * @nr_failed: How many folio invalidations failed
* *
* This helper is similar to invalidate_mapping_pages(), except that it accounts * This function is similar to invalidate_mapping_pages(), except that it
* for pages that are likely on a pagevec and counts them in @nr_pagevec, which * returns the number of folios which could not be evicted in @nr_failed.
* will be used by the caller.
*/ */
unsigned long invalidate_mapping_pagevec(struct address_space *mapping, unsigned long mapping_try_invalidate(struct address_space *mapping,
pgoff_t start, pgoff_t end, unsigned long *nr_pagevec) pgoff_t start, pgoff_t end, unsigned long *nr_failed)
{ {
pgoff_t indices[PAGEVEC_SIZE]; pgoff_t indices[PAGEVEC_SIZE];
struct folio_batch fbatch; struct folio_batch fbatch;
@ -527,9 +526,9 @@ unsigned long invalidate_mapping_pagevec(struct address_space *mapping,
*/ */
if (!ret) { if (!ret) {
deactivate_file_folio(folio); deactivate_file_folio(folio);
/* It is likely on the pagevec of a remote CPU */ /* Likely in the lru cache of a remote CPU */
if (nr_pagevec) if (nr_failed)
(*nr_pagevec)++; (*nr_failed)++;
} }
count += ret; count += ret;
} }
@ -552,12 +551,12 @@ unsigned long invalidate_mapping_pagevec(struct address_space *mapping,
* If you want to remove all the pages of one inode, regardless of * If you want to remove all the pages of one inode, regardless of
* their use and writeback state, use truncate_inode_pages(). * their use and writeback state, use truncate_inode_pages().
* *
* Return: the number of the cache entries that were invalidated * Return: The number of indices that had their contents invalidated
*/ */
unsigned long invalidate_mapping_pages(struct address_space *mapping, unsigned long invalidate_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t end) pgoff_t start, pgoff_t end)
{ {
return invalidate_mapping_pagevec(mapping, start, end, NULL); return mapping_try_invalidate(mapping, start, end, NULL);
} }
EXPORT_SYMBOL(invalidate_mapping_pages); EXPORT_SYMBOL(invalidate_mapping_pages);