fs: Turn block_invalidatepage into block_invalidate_folio

Remove special-casing of a NULL invalidatepage, since there is no
more block_invalidatepage.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Tested-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
Acked-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
Tested-by: Mike Marshall <hubcap@omnibond.com> # orangefs
Tested-by: David Howells <dhowells@redhat.com> # afs
This commit is contained in:
Matthew Wilcox (Oracle) 2022-02-09 20:21:34 +00:00
parent d82354f6b0
commit 7ba13abbd3
27 changed files with 65 additions and 45 deletions

View File

@ -430,6 +430,7 @@ static int blkdev_writepages(struct address_space *mapping,
const struct address_space_operations def_blk_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.invalidate_folio = block_invalidate_folio,
.readpage = blkdev_readpage,
.readahead = blkdev_readahead,
.writepage = blkdev_writepage,

View File

@ -74,6 +74,7 @@ static sector_t _adfs_bmap(struct address_space *mapping, sector_t block)
static const struct address_space_operations adfs_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.invalidate_folio = block_invalidate_folio,
.readpage = adfs_readpage,
.writepage = adfs_writepage,
.write_begin = adfs_write_begin,

View File

@ -454,6 +454,7 @@ static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations affs_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.invalidate_folio = block_invalidate_folio,
.readpage = affs_readpage,
.writepage = affs_writepage,
.write_begin = affs_write_begin,
@ -835,6 +836,7 @@ err_bh:
const struct address_space_operations affs_aops_ofs = {
.set_page_dirty = __set_page_dirty_buffers,
.invalidate_folio = block_invalidate_folio,
.readpage = affs_readpage_ofs,
//.writepage = affs_writepage_ofs,
.write_begin = affs_write_begin_ofs,

View File

@ -189,6 +189,7 @@ static sector_t bfs_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations bfs_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.invalidate_folio = block_invalidate_folio,
.readpage = bfs_readpage,
.writepage = bfs_writepage,
.write_begin = bfs_write_begin,

View File

@ -1482,41 +1482,40 @@ static void discard_buffer(struct buffer_head * bh)
}
/**
* block_invalidatepage - invalidate part or all of a buffer-backed page
*
* @page: the page which is affected
* block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
* @folio: The folio which is affected.
* @offset: start of the range to invalidate
* @length: length of the range to invalidate
*
* block_invalidatepage() is called when all or part of the page has become
* block_invalidate_folio() is called when all or part of the folio has been
* invalidated by a truncate operation.
*
* block_invalidatepage() does not have to release all buffers, but it must
* block_invalidate_folio() does not have to release all buffers, but it must
* ensure that no dirty buffer is left outside @offset and that no I/O
* is underway against any of the blocks which are outside the truncation
* point. Because the caller is about to free (and possibly reuse) those
* blocks on-disk.
*/
void block_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
{
struct buffer_head *head, *bh, *next;
unsigned int curr_off = 0;
unsigned int stop = length + offset;
size_t curr_off = 0;
size_t stop = length + offset;
BUG_ON(!PageLocked(page));
if (!page_has_buffers(page))
goto out;
BUG_ON(!folio_test_locked(folio));
/*
* Check for overflow
*/
BUG_ON(stop > PAGE_SIZE || stop < length);
BUG_ON(stop > folio_size(folio) || stop < length);
head = folio_buffers(folio);
if (!head)
return;
head = page_buffers(page);
bh = head;
do {
unsigned int next_off = curr_off + bh->b_size;
size_t next_off = curr_off + bh->b_size;
next = bh->b_this_page;
/*
@ -1535,16 +1534,16 @@ void block_invalidatepage(struct page *page, unsigned int offset,
} while (bh != head);
/*
* We release buffers only if the entire page is being invalidated.
* We release buffers only if the entire folio is being invalidated.
* The get_block cached value has been unconditionally invalidated,
* so real IO is not possible anymore.
*/
if (length == PAGE_SIZE)
try_to_release_page(page, 0);
if (length == folio_size(folio))
filemap_release_folio(folio, 0);
out:
return;
}
EXPORT_SYMBOL(block_invalidatepage);
EXPORT_SYMBOL(block_invalidate_folio);
/*

View File

@ -546,6 +546,7 @@ const struct address_space_operations ecryptfs_aops = {
*/
#ifdef CONFIG_BLOCK
.set_page_dirty = __set_page_dirty_buffers,
.invalidate_folio = block_invalidate_folio,
#endif
.writepage = ecryptfs_writepage,
.readpage = ecryptfs_readpage,

View File

@ -491,6 +491,7 @@ int exfat_block_truncate_page(struct inode *inode, loff_t from)
static const struct address_space_operations exfat_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.invalidate_folio = block_invalidate_folio,
.readpage = exfat_readpage,
.readahead = exfat_readahead,
.writepage = exfat_writepage,

View File

@ -968,6 +968,7 @@ ext2_dax_writepages(struct address_space *mapping, struct writeback_control *wbc
const struct address_space_operations ext2_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.invalidate_folio = block_invalidate_folio,
.readpage = ext2_readpage,
.readahead = ext2_readahead,
.writepage = ext2_writepage,
@ -983,6 +984,7 @@ const struct address_space_operations ext2_aops = {
const struct address_space_operations ext2_nobh_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.invalidate_folio = block_invalidate_folio,
.readpage = ext2_readpage,
.readahead = ext2_readahead,
.writepage = ext2_nobh_writepage,

View File

@ -137,8 +137,6 @@ static inline int ext4_begin_ordered_truncate(struct inode *inode,
new_size);
}
static void ext4_invalidatepage(struct page *page, unsigned int offset,
unsigned int length);
static int __ext4_journalled_writepage(struct page *page, unsigned int len);
static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
int pextents);
@ -1571,16 +1569,18 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
break;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
struct folio *folio = page_folio(page);
BUG_ON(!PageLocked(page));
BUG_ON(PageWriteback(page));
BUG_ON(!folio_test_locked(folio));
BUG_ON(folio_test_writeback(folio));
if (invalidate) {
if (page_mapped(page))
clear_page_dirty_for_io(page);
block_invalidatepage(page, 0, PAGE_SIZE);
ClearPageUptodate(page);
if (folio_mapped(folio))
folio_clear_dirty_for_io(folio);
block_invalidate_folio(folio, 0,
folio_size(folio));
folio_clear_uptodate(folio);
}
unlock_page(page);
folio_unlock(folio);
}
pagevec_release(&pvec);
}
@ -3183,15 +3183,15 @@ static void ext4_readahead(struct readahead_control *rac)
ext4_mpage_readpages(inode, rac, NULL);
}
static void ext4_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
static void ext4_invalidate_folio(struct folio *folio, size_t offset,
size_t length)
{
trace_ext4_invalidatepage(page, offset, length);
trace_ext4_invalidatepage(&folio->page, offset, length);
/* No journalling happens on data buffers when this function is used */
WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page)));
WARN_ON(folio_buffers(folio) && buffer_jbd(folio_buffers(folio)));
block_invalidatepage(page, offset, length);
block_invalidate_folio(folio, offset, length);
}
static int __ext4_journalled_invalidatepage(struct page *page,
@ -3583,7 +3583,7 @@ static const struct address_space_operations ext4_aops = {
.write_end = ext4_write_end,
.set_page_dirty = ext4_set_page_dirty,
.bmap = ext4_bmap,
.invalidatepage = ext4_invalidatepage,
.invalidate_folio = ext4_invalidate_folio,
.releasepage = ext4_releasepage,
.direct_IO = noop_direct_IO,
.migratepage = buffer_migrate_page,
@ -3618,7 +3618,7 @@ static const struct address_space_operations ext4_da_aops = {
.write_end = ext4_da_write_end,
.set_page_dirty = ext4_set_page_dirty,
.bmap = ext4_bmap,
.invalidatepage = ext4_invalidatepage,
.invalidate_folio = ext4_invalidate_folio,
.releasepage = ext4_releasepage,
.direct_IO = noop_direct_IO,
.migratepage = buffer_migrate_page,

View File

@ -343,6 +343,7 @@ int fat_block_truncate_page(struct inode *inode, loff_t from)
static const struct address_space_operations fat_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.invalidate_folio = block_invalidate_folio,
.readpage = fat_readpage,
.readahead = fat_readahead,
.writepage = fat_writepage,

View File

@ -90,12 +90,14 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
const struct address_space_operations gfs2_meta_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.invalidate_folio = block_invalidate_folio,
.writepage = gfs2_aspace_writepage,
.releasepage = gfs2_releasepage,
};
const struct address_space_operations gfs2_rgrp_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.invalidate_folio = block_invalidate_folio,
.writepage = gfs2_aspace_writepage,
.releasepage = gfs2_releasepage,
};

View File

@ -160,6 +160,7 @@ static int hfs_writepages(struct address_space *mapping,
const struct address_space_operations hfs_btree_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.invalidate_folio = block_invalidate_folio,
.readpage = hfs_readpage,
.writepage = hfs_writepage,
.write_begin = hfs_write_begin,
@ -170,6 +171,7 @@ const struct address_space_operations hfs_btree_aops = {
const struct address_space_operations hfs_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.invalidate_folio = block_invalidate_folio,
.readpage = hfs_readpage,
.writepage = hfs_writepage,
.write_begin = hfs_write_begin,

View File

@ -157,6 +157,7 @@ static int hfsplus_writepages(struct address_space *mapping,
const struct address_space_operations hfsplus_btree_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.invalidate_folio = block_invalidate_folio,
.readpage = hfsplus_readpage,
.writepage = hfsplus_writepage,
.write_begin = hfsplus_write_begin,
@ -167,6 +168,7 @@ const struct address_space_operations hfsplus_btree_aops = {
const struct address_space_operations hfsplus_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.invalidate_folio = block_invalidate_folio,
.readpage = hfsplus_readpage,
.writepage = hfsplus_writepage,
.write_begin = hfsplus_write_begin,

View File

@ -246,6 +246,7 @@ static int hpfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
const struct address_space_operations hpfs_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.invalidate_folio = block_invalidate_folio,
.readpage = hpfs_readpage,
.writepage = hpfs_writepage,
.readahead = hpfs_readahead,

View File

@ -358,6 +358,7 @@ static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
const struct address_space_operations jfs_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.invalidate_folio = block_invalidate_folio,
.readpage = jfs_readpage,
.readahead = jfs_readahead,
.writepage = jfs_writepage,

View File

@ -443,6 +443,7 @@ static sector_t minix_bmap(struct address_space *mapping, sector_t block)
static const struct address_space_operations minix_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.invalidate_folio = block_invalidate_folio,
.readpage = minix_readpage,
.writepage = minix_writepage,
.write_begin = minix_write_begin,

View File

@ -304,7 +304,7 @@ const struct address_space_operations nilfs_aops = {
.write_begin = nilfs_write_begin,
.write_end = nilfs_write_end,
/* .releasepage = nilfs_releasepage, */
.invalidatepage = block_invalidatepage,
.invalidate_folio = block_invalidate_folio,
.direct_IO = nilfs_direct_IO,
.is_partially_uptodate = block_is_partially_uptodate,
};

View File

@ -435,6 +435,7 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc)
static const struct address_space_operations def_mdt_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.invalidate_folio = block_invalidate_folio,
.writepage = nilfs_mdt_write_page,
};

View File

@ -1350,12 +1350,13 @@ retry_writepage:
/* Is the page fully outside i_size? (truncate in progress) */
if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >>
PAGE_SHIFT)) {
struct folio *folio = page_folio(page);
/*
* The page may have dirty, unmapped buffers. Make them
* freeable here, so the page does not leak.
*/
block_invalidatepage(page, 0, PAGE_SIZE);
unlock_page(page);
block_invalidate_folio(folio, 0, folio_size(folio));
folio_unlock(folio);
ntfs_debug("Write outside i_size - truncated?");
return 0;
}

View File

@ -2461,7 +2461,7 @@ const struct address_space_operations ocfs2_aops = {
.write_end = ocfs2_write_end,
.bmap = ocfs2_bmap,
.direct_IO = ocfs2_direct_IO,
.invalidatepage = block_invalidatepage,
.invalidate_folio = block_invalidate_folio,
.releasepage = ocfs2_releasepage,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,

View File

@ -373,6 +373,7 @@ const struct inode_operations omfs_file_inops = {
const struct address_space_operations omfs_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.invalidate_folio = block_invalidate_folio,
.readpage = omfs_readpage,
.readahead = omfs_readahead,
.writepage = omfs_writepage,

View File

@ -496,6 +496,7 @@ static sector_t sysv_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations sysv_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.invalidate_folio = block_invalidate_folio,
.readpage = sysv_readpage,
.writepage = sysv_writepage,
.write_begin = sysv_write_begin,

View File

@ -126,6 +126,7 @@ static int udf_adinicb_write_end(struct file *file, struct address_space *mappin
const struct address_space_operations udf_adinicb_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.invalidate_folio = block_invalidate_folio,
.readpage = udf_adinicb_readpage,
.writepage = udf_adinicb_writepage,
.write_begin = udf_adinicb_write_begin,

View File

@ -236,6 +236,7 @@ static sector_t udf_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations udf_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.invalidate_folio = block_invalidate_folio,
.readpage = udf_readpage,
.readahead = udf_readahead,
.writepage = udf_writepage,

View File

@ -527,6 +527,7 @@ static sector_t ufs_bmap(struct address_space *mapping, sector_t block)
const struct address_space_operations ufs_aops = {
.set_page_dirty = __set_page_dirty_buffers,
.invalidate_folio = block_invalidate_folio,
.readpage = ufs_readpage,
.writepage = ufs_writepage,
.write_begin = ufs_write_begin,

View File

@ -217,8 +217,7 @@ extern int buffer_heads_over_limit;
* Generic address_space_operations implementations for buffer_head-backed
* address_spaces.
*/
void block_invalidatepage(struct page *page, unsigned int offset,
unsigned int length);
void block_invalidate_folio(struct folio *folio, size_t offset, size_t length);
int block_write_full_page(struct page *page, get_block_t *get_block,
struct writeback_control *wbc);
int __block_write_full_page(struct inode *inode, struct page *page,

View File

@ -163,10 +163,6 @@ void folio_invalidate(struct folio *folio, size_t offset, size_t length)
}
invalidatepage = aops->invalidatepage;
#ifdef CONFIG_BLOCK
if (!invalidatepage)
invalidatepage = block_invalidatepage;
#endif
if (invalidatepage)
(*invalidatepage)(&folio->page, offset, length);
}