mm: convert pagecache_isize_extended to use a folio

Remove four hidden calls to compound_head().  Also exit early if the
filesystem block size is >= PAGE_SIZE instead of just equal to PAGE_SIZE.

Link: https://lkml.kernel.org/r/20240405180038.2618624-1-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Pankaj Raghav <p.raghav@samsung.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2024-04-05 19:00:36 +01:00 committed by Andrew Morton
parent 55d134a7b4
commit 2ebe90dab9
1 changed files with 17 additions and 19 deletions

View File

@ -764,15 +764,15 @@ EXPORT_SYMBOL(truncate_setsize);
* @from: original inode size * @from: original inode size
* @to: new inode size * @to: new inode size
* *
* Handle extension of inode size either caused by extending truncate or by * Handle extension of inode size either caused by extending truncate or
* write starting after current i_size. We mark the page straddling current * by write starting after current i_size. We mark the page straddling
* i_size RO so that page_mkwrite() is called on the nearest write access to * current i_size RO so that page_mkwrite() is called on the first
* the page. This way filesystem can be sure that page_mkwrite() is called on * write access to the page. The filesystem will update its per-block
* the page before user writes to the page via mmap after the i_size has been * information before user writes to the page via mmap after the i_size
* changed. * has been changed.
* *
* The function must be called after i_size is updated so that page fault * The function must be called after i_size is updated so that page fault
* coming after we unlock the page will already see the new i_size. * coming after we unlock the folio will already see the new i_size.
* The function must be called while we still hold i_rwsem - this not only * The function must be called while we still hold i_rwsem - this not only
* makes sure i_size is stable but also that userspace cannot observe new * makes sure i_size is stable but also that userspace cannot observe new
* i_size value before we are prepared to store mmap writes at new inode size. * i_size value before we are prepared to store mmap writes at new inode size.
@ -781,31 +781,29 @@ void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
{ {
int bsize = i_blocksize(inode); int bsize = i_blocksize(inode);
loff_t rounded_from; loff_t rounded_from;
struct page *page; struct folio *folio;
pgoff_t index;
WARN_ON(to > inode->i_size); WARN_ON(to > inode->i_size);
if (from >= to || bsize == PAGE_SIZE) if (from >= to || bsize >= PAGE_SIZE)
return; return;
/* Page straddling @from will not have any hole block created? */ /* Page straddling @from will not have any hole block created? */
rounded_from = round_up(from, bsize); rounded_from = round_up(from, bsize);
if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1))) if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
return; return;
index = from >> PAGE_SHIFT; folio = filemap_lock_folio(inode->i_mapping, from / PAGE_SIZE);
page = find_lock_page(inode->i_mapping, index); /* Folio not cached? Nothing to do */
/* Page not cached? Nothing to do */ if (IS_ERR(folio))
if (!page)
return; return;
/* /*
* See clear_page_dirty_for_io() for details why set_page_dirty() * See folio_clear_dirty_for_io() for details why folio_mark_dirty()
* is needed. * is needed.
*/ */
if (page_mkclean(page)) if (folio_mkclean(folio))
set_page_dirty(page); folio_mark_dirty(folio);
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
} }
EXPORT_SYMBOL(pagecache_isize_extended); EXPORT_SYMBOL(pagecache_isize_extended);