xfs: factor mapping out of xfs_do_writepage

Separate out the bufferhead based mapping from the writepage code so
that we have a clear separation of the page operations and the
bufferhead state.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Dave Chinner <david@fromorbit.com>
This commit is contained in:
Dave Chinner 2016-02-15 17:21:37 +11:00 committed by Dave Chinner
parent ad68972acb
commit bfce7d2e2d
1 changed files with 122 additions and 110 deletions

View File

@ -747,6 +747,127 @@ xfs_writepage_submit(
return status;
}
static int
xfs_writepage_map(
struct xfs_writepage_ctx *wpc,
struct inode *inode,
struct page *page,
loff_t offset,
__uint64_t end_offset)
{
struct buffer_head *bh, *head;
ssize_t len = 1 << inode->i_blkbits;
int error = 0;
int uptodate = 1;
int count = 0;
bh = head = page_buffers(page);
offset = page_offset(page);
do {
if (offset >= end_offset)
break;
if (!buffer_uptodate(bh))
uptodate = 0;
/*
* set_page_dirty dirties all buffers in a page, independent
* of their state. The dirty state however is entirely
* meaningless for holes (!mapped && uptodate), so skip
* buffers covering holes here.
*/
if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
wpc->imap_valid = false;
continue;
}
if (buffer_unwritten(bh)) {
if (wpc->io_type != XFS_IO_UNWRITTEN) {
wpc->io_type = XFS_IO_UNWRITTEN;
wpc->imap_valid = false;
}
} else if (buffer_delay(bh)) {
if (wpc->io_type != XFS_IO_DELALLOC) {
wpc->io_type = XFS_IO_DELALLOC;
wpc->imap_valid = false;
}
} else if (buffer_uptodate(bh)) {
if (wpc->io_type != XFS_IO_OVERWRITE) {
wpc->io_type = XFS_IO_OVERWRITE;
wpc->imap_valid = false;
}
} else {
if (PageUptodate(page))
ASSERT(buffer_mapped(bh));
/*
* This buffer is not uptodate and will not be
* written to disk. Ensure that we will put any
* subsequent writeable buffers into a new
* ioend.
*/
wpc->imap_valid = false;
continue;
}
if (wpc->imap_valid)
wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
offset);
if (!wpc->imap_valid) {
error = xfs_map_blocks(inode, offset, &wpc->imap,
wpc->io_type);
if (error)
goto out_error;
wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
offset);
}
if (wpc->imap_valid) {
lock_buffer(bh);
if (wpc->io_type != XFS_IO_OVERWRITE)
xfs_map_at_offset(inode, bh, &wpc->imap, offset);
xfs_add_to_ioend(inode, bh, offset, wpc);
count++;
}
if (!wpc->iohead)
wpc->iohead = wpc->ioend;
} while (offset += len, ((bh = bh->b_this_page) != head));
if (uptodate && bh == head)
SetPageUptodate(page);
xfs_start_page_writeback(page, 1, count);
ASSERT(wpc->iohead || !count);
return 0;
out_error:
/*
* On error, we have to fail the iohead here because we locked buffers
* in the ioend chain. If we don't do this, we'll deadlock invalidating
* the page as that tries to lock the buffers on the page. Also, because
* we may have set pages under writeback, we have to make sure we run
* IO completion to mark the error state of the IO appropriately, so we
* can't cancel the ioend directly here. That means we have to mark this
* page as under writeback if we included any buffers from it in the
* ioend chain so that completion treats it correctly.
*
* If we didn't include the page in the ioend, then we can simply
* discard and unlock it as there are no other users of the page or it's
* buffers right now. The caller will still need to trigger submission
* of outstanding ioends on the writepage context so they are treated
* correctly on error.
*/
if (count)
xfs_start_page_writeback(page, 0, count);
else {
xfs_aops_discard_page(page);
ClearPageUptodate(page);
unlock_page(page);
}
mapping_set_error(page->mapping, error);
return error;
}
/*
* Write out a dirty page.
*
@ -763,13 +884,9 @@ xfs_do_writepage(
{
struct xfs_writepage_ctx *wpc = data;
struct inode *inode = page->mapping->host;
struct buffer_head *bh, *head;
loff_t offset;
__uint64_t end_offset;
pgoff_t end_index;
ssize_t len;
int err, uptodate = 1;
int count = 0;
trace_xfs_writepage(inode, page, 0, 0);
@ -862,112 +979,7 @@ xfs_do_writepage(
end_offset = offset;
}
len = 1 << inode->i_blkbits;
bh = head = page_buffers(page);
offset = page_offset(page);
do {
if (offset >= end_offset)
break;
if (!buffer_uptodate(bh))
uptodate = 0;
/*
* set_page_dirty dirties all buffers in a page, independent
* of their state. The dirty state however is entirely
* meaningless for holes (!mapped && uptodate), so skip
* buffers covering holes here.
*/
if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
wpc->imap_valid = false;
continue;
}
if (buffer_unwritten(bh)) {
if (wpc->io_type != XFS_IO_UNWRITTEN) {
wpc->io_type = XFS_IO_UNWRITTEN;
wpc->imap_valid = false;
}
} else if (buffer_delay(bh)) {
if (wpc->io_type != XFS_IO_DELALLOC) {
wpc->io_type = XFS_IO_DELALLOC;
wpc->imap_valid = false;
}
} else if (buffer_uptodate(bh)) {
if (wpc->io_type != XFS_IO_OVERWRITE) {
wpc->io_type = XFS_IO_OVERWRITE;
wpc->imap_valid = false;
}
} else {
if (PageUptodate(page))
ASSERT(buffer_mapped(bh));
/*
* This buffer is not uptodate and will not be
* written to disk. Ensure that we will put any
* subsequent writeable buffers into a new
* ioend.
*/
wpc->imap_valid = 0;
continue;
}
if (wpc->imap_valid)
wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap, offset);
if (!wpc->imap_valid) {
err = xfs_map_blocks(inode, offset, &wpc->imap,
wpc->io_type);
if (err)
goto error;
wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap, offset);
}
if (wpc->imap_valid) {
lock_buffer(bh);
if (wpc->io_type != XFS_IO_OVERWRITE)
xfs_map_at_offset(inode, bh, &wpc->imap, offset);
xfs_add_to_ioend(inode, bh, offset, wpc);
count++;
}
if (!wpc->iohead)
wpc->iohead = wpc->ioend;
} while (offset += len, ((bh = bh->b_this_page) != head));
if (uptodate && bh == head)
SetPageUptodate(page);
xfs_start_page_writeback(page, 1, count);
ASSERT(wpc->iohead || !count);
return 0;
error:
/*
* On error, we have to fail the iohead here because we buffers locked
* in the ioend chain. If we don't do this, we'll deadlock invalidating
* the page as that tries to lock the buffers on the page. Also, because
* we may have set pages under writeback, we have to make sure we run
* IO completion to mark the error state of the IO appropriately, so we
* can't cancel the ioend directly here. That means we have to mark this
* page as under writeback if we included any buffers from it in the
* ioend chain so that completion treats it correctly.
*
* If we didn't include the page in the ioend, then we can simply
* discard and unlock it as there are no other users of the page or it's
* buffers right now. The caller will still need to trigger submission
* of outstanding ioends on the writepage context so they are treated
* correctly on error.
*/
if (count)
xfs_start_page_writeback(page, 0, count);
else {
xfs_aops_discard_page(page);
ClearPageUptodate(page);
unlock_page(page);
}
mapping_set_error(page->mapping, err);
return err;
return xfs_writepage_map(wpc, inode, page, offset, end_offset);
redirty:
redirty_page_for_writepage(wbc, page);