filemap: Add filemap_get_folios()

This is the equivalent of find_get_pages() but fills a folio_batch
instead of an array of pages.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Acked-by: Christian Brauner (Microsoft) <brauner@kernel.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2022-06-03 15:30:25 -04:00
parent 2bb876b58d
commit be0ced5e9c
2 changed files with 61 additions and 0 deletions

View File

@ -718,6 +718,8 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index)
return head + (index & (thp_nr_pages(head) - 1));
}
unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
pgoff_t end, struct folio_batch *fbatch);
unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
pgoff_t end, unsigned int nr_pages,
struct page **pages);

View File

@ -2127,6 +2127,65 @@ put:
return folio_batch_count(fbatch);
}
/**
* filemap_get_folios - Get a batch of folios
* @mapping: The address_space to search
* @start: The starting page index
* @end: The final page index (inclusive)
* @fbatch: The batch to fill.
*
* Search for and return a batch of folios in the mapping starting at
* index @start and up to index @end (inclusive). The folios are returned
* in @fbatch with an elevated reference count.
*
* The first folio may start before @start; if it does, it will contain
* @start. The final folio may extend beyond @end; if it does, it will
* contain @end. The folios have ascending indices. There may be gaps
* between the folios if there are indices which have no folio in the
* page cache. If folios are added to or removed from the page cache
* while this is running, they may or may not be found by this call.
*
* Return: The number of folios which were found.
* We also update @start to index the next folio for the traversal.
*/
unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
pgoff_t end, struct folio_batch *fbatch)
{
XA_STATE(xas, &mapping->i_pages, *start);
struct folio *folio;
rcu_read_lock();
while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {
/* Skip over shadow, swap and DAX entries */
if (xa_is_value(folio))
continue;
if (!folio_batch_add(fbatch, folio)) {
unsigned long nr = folio_nr_pages(folio);
if (folio_test_hugetlb(folio))
nr = 1;
*start = folio->index + nr;
goto out;
}
}
/*
* We come here when there is no page beyond @end. We take care to not
* overflow the index @start as it confuses some of the callers. This
* breaks the iteration when there is a page at index -1 but that is
* already broken anyway.
*/
if (end == (pgoff_t)-1)
*start = (pgoff_t)-1;
else
*start = end + 1;
out:
rcu_read_unlock();
return folio_batch_count(fbatch);
}
EXPORT_SYMBOL(filemap_get_folios);
static inline
bool folio_more_pages(struct folio *folio, pgoff_t index, pgoff_t max)
{