readahead: Convert page_cache_async_ra() to take a folio

Using the folio here avoids checking whether it's a tail page.
This patch mostly just enables some of the following patches.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
This commit is contained in:
Matthew Wilcox (Oracle) 2021-05-27 12:30:54 -04:00
parent 2fa4eeb800
commit 7836d99900
2 changed files with 5 additions and 5 deletions

View File

@ -993,7 +993,7 @@ struct readahead_control {
void page_cache_ra_unbounded(struct readahead_control *, void page_cache_ra_unbounded(struct readahead_control *,
unsigned long nr_to_read, unsigned long lookahead_count); unsigned long nr_to_read, unsigned long lookahead_count);
void page_cache_sync_ra(struct readahead_control *, unsigned long req_count); void page_cache_sync_ra(struct readahead_control *, unsigned long req_count);
void page_cache_async_ra(struct readahead_control *, struct page *, void page_cache_async_ra(struct readahead_control *, struct folio *,
unsigned long req_count); unsigned long req_count);
void readahead_expand(struct readahead_control *ractl, void readahead_expand(struct readahead_control *ractl,
loff_t new_start, size_t new_len); loff_t new_start, size_t new_len);
@ -1040,7 +1040,7 @@ void page_cache_async_readahead(struct address_space *mapping,
struct page *page, pgoff_t index, unsigned long req_count) struct page *page, pgoff_t index, unsigned long req_count)
{ {
DEFINE_READAHEAD(ractl, file, ra, mapping, index); DEFINE_READAHEAD(ractl, file, ra, mapping, index);
page_cache_async_ra(&ractl, page, req_count); page_cache_async_ra(&ractl, page_folio(page), req_count);
} }
static inline struct folio *__readahead_folio(struct readahead_control *ractl) static inline struct folio *__readahead_folio(struct readahead_control *ractl)

View File

@ -581,7 +581,7 @@ void page_cache_sync_ra(struct readahead_control *ractl,
EXPORT_SYMBOL_GPL(page_cache_sync_ra); EXPORT_SYMBOL_GPL(page_cache_sync_ra);
void page_cache_async_ra(struct readahead_control *ractl, void page_cache_async_ra(struct readahead_control *ractl,
struct page *page, unsigned long req_count) struct folio *folio, unsigned long req_count)
{ {
/* no read-ahead */ /* no read-ahead */
if (!ractl->ra->ra_pages) if (!ractl->ra->ra_pages)
@ -590,10 +590,10 @@ void page_cache_async_ra(struct readahead_control *ractl,
/* /*
* Same bit is used for PG_readahead and PG_reclaim. * Same bit is used for PG_readahead and PG_reclaim.
*/ */
if (PageWriteback(page)) if (folio_test_writeback(folio))
return; return;
ClearPageReadahead(page); folio_clear_readahead(folio);
/* /*
* Defer asynchronous read-ahead on IO congestion. * Defer asynchronous read-ahead on IO congestion.