Merge branch 'xfs-io-fixes' into for-next

This commit is contained in:
Dave Chinner 2015-10-12 18:38:11 +11:00
commit 8a56d7c305
5 changed files with 59 additions and 20 deletions

View file

@ -172,6 +172,12 @@ xfs_setfilesize_ioend(
current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
__sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
/* we abort the update if there was an IO error */
if (ioend->io_error) {
xfs_trans_cancel(tp);
return ioend->io_error;
}
return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
}
@ -212,14 +218,17 @@ xfs_end_io(
ioend->io_error = -EIO;
goto done;
}
if (ioend->io_error)
goto done;
/*
* For unwritten extents we need to issue transactions to convert a
* range to normal written extens after the data I/O has finished.
* Detecting and handling completion IO errors is done individually
* for each case as different cleanup operations need to be performed
* on error.
*/
if (ioend->io_type == XFS_IO_UNWRITTEN) {
if (ioend->io_error)
goto done;
error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
ioend->io_size);
} else if (ioend->io_append_trans) {
@ -1399,12 +1408,12 @@ __xfs_get_blocks(
imap.br_startblock == DELAYSTARTBLOCK))) {
if (direct || xfs_get_extsz_hint(ip)) {
/*
* Drop the ilock in preparation for starting the block
* allocation transaction. It will be retaken
* exclusively inside xfs_iomap_write_direct for the
* actual allocation.
* xfs_iomap_write_direct() expects the shared lock. It
* is unlocked on return.
*/
xfs_iunlock(ip, lockmode);
if (lockmode == XFS_ILOCK_EXCL)
xfs_ilock_demote(ip, lockmode);
error = xfs_iomap_write_direct(ip, offset, size,
&imap, nimaps);
if (error)

View file

@ -482,6 +482,8 @@ xfs_zero_eof(
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
ASSERT(offset > isize);
trace_xfs_zero_eof(ip, isize, offset - isize);
/*
* First handle zeroing the block on which isize resides.
*
@ -574,6 +576,7 @@ xfs_file_aio_write_checks(
struct xfs_inode *ip = XFS_I(inode);
ssize_t error = 0;
size_t count = iov_iter_count(from);
bool drained_dio = false;
restart:
error = generic_write_checks(iocb, from);
@ -611,12 +614,13 @@ xfs_file_aio_write_checks(
bool zero = false;
spin_unlock(&ip->i_flags_lock);
if (*iolock == XFS_IOLOCK_SHARED) {
xfs_rw_iunlock(ip, *iolock);
*iolock = XFS_IOLOCK_EXCL;
xfs_rw_ilock(ip, *iolock);
iov_iter_reexpand(from, count);
if (!drained_dio) {
if (*iolock == XFS_IOLOCK_SHARED) {
xfs_rw_iunlock(ip, *iolock);
*iolock = XFS_IOLOCK_EXCL;
xfs_rw_ilock(ip, *iolock);
iov_iter_reexpand(from, count);
}
/*
* We now have an IO submission barrier in place, but
* AIO can do EOF updates during IO completion and hence
@ -626,6 +630,7 @@ xfs_file_aio_write_checks(
* no-op.
*/
inode_dio_wait(inode);
drained_dio = true;
goto restart;
}
error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), &zero);

View file

@ -131,20 +131,29 @@ xfs_iomap_write_direct(
uint qblocks, resblks, resrtextents;
int committed;
int error;
error = xfs_qm_dqattach(ip, 0);
if (error)
return error;
int lockmode;
rt = XFS_IS_REALTIME_INODE(ip);
extsz = xfs_get_extsz_hint(ip);
lockmode = XFS_ILOCK_SHARED; /* locked by caller */
ASSERT(xfs_isilocked(ip, lockmode));
offset_fsb = XFS_B_TO_FSBT(mp, offset);
last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
if ((offset + count) > XFS_ISIZE(ip)) {
/*
* Assert that the in-core extent list is present since this can
* call xfs_iread_extents() and we only have the ilock shared.
* This should be safe because the lock was held around a bmapi
* call in the caller and we only need it to access the in-core
* list.
*/
ASSERT(XFS_IFORK_PTR(ip, XFS_DATA_FORK)->if_flags &
XFS_IFEXTENTS);
error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb);
if (error)
return error;
goto out_unlock;
} else {
if (nmaps && (imap->br_startblock == HOLESTARTBLOCK))
last_fsb = MIN(last_fsb, (xfs_fileoff_t)
@ -173,6 +182,15 @@ xfs_iomap_write_direct(
quota_flag = XFS_QMOPT_RES_REGBLKS;
}
/*
* Drop the shared lock acquired by the caller, attach the dquot if
* necessary and move on to transaction setup.
*/
xfs_iunlock(ip, lockmode);
error = xfs_qm_dqattach(ip, 0);
if (error)
return error;
/*
* Allocate and setup the transaction
*/
@ -187,7 +205,8 @@ xfs_iomap_write_direct(
return error;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
lockmode = XFS_ILOCK_EXCL;
xfs_ilock(ip, lockmode);
error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
if (error)
@ -229,7 +248,7 @@ xfs_iomap_write_direct(
error = xfs_alert_fsblock_zero(ip, imap);
out_unlock:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
xfs_iunlock(ip, lockmode);
return error;
out_bmap_cancel:

View file

@ -181,6 +181,11 @@ xfs_fs_map_blocks(
ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
if (!nimaps || imap.br_startblock == HOLESTARTBLOCK) {
/*
* xfs_iomap_write_direct() expects to take ownership of
* the shared ilock.
*/
xfs_ilock(ip, XFS_ILOCK_SHARED);
error = xfs_iomap_write_direct(ip, offset, length,
&imap, nimaps);
if (error)

View file

@ -1312,6 +1312,7 @@ DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc);
DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert);
DEFINE_SIMPLE_IO_EVENT(xfs_get_blocks_notfound);
DEFINE_SIMPLE_IO_EVENT(xfs_setfilesize);
DEFINE_SIMPLE_IO_EVENT(xfs_zero_eof);
DECLARE_EVENT_CLASS(xfs_itrunc_class,
TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size),