mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-02 23:27:06 +00:00
xfs: avoid direct I/O write vs buffered I/O race
Currently a buffered reader or writer can add pages to the pagecache while we are waiting for the iolock in xfs_file_dio_aio_write. Prevent this by re-checking mapping->nrpages after we got the iolock, and if nessecary upgrade the lock to exclusive mode. To simplify this a bit only take the ilock inside of xfs_file_aio_write_checks. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Alex Elder <aelder@sgi.com>
This commit is contained in:
parent
859f57ca00
commit
c58cb165bd
1 changed files with 14 additions and 3 deletions
|
@ -676,6 +676,7 @@ xfs_file_aio_write_checks(
|
||||||
xfs_fsize_t new_size;
|
xfs_fsize_t new_size;
|
||||||
int error = 0;
|
int error = 0;
|
||||||
|
|
||||||
|
xfs_rw_ilock(ip, XFS_ILOCK_EXCL);
|
||||||
*new_sizep = 0;
|
*new_sizep = 0;
|
||||||
restart:
|
restart:
|
||||||
error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode));
|
error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode));
|
||||||
|
@ -798,14 +799,24 @@ xfs_file_dio_aio_write(
|
||||||
*iolock = XFS_IOLOCK_EXCL;
|
*iolock = XFS_IOLOCK_EXCL;
|
||||||
else
|
else
|
||||||
*iolock = XFS_IOLOCK_SHARED;
|
*iolock = XFS_IOLOCK_SHARED;
|
||||||
xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);
|
xfs_rw_ilock(ip, *iolock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Recheck if there are cached pages that need invalidate after we got
|
||||||
|
* the iolock to protect against other threads adding new pages while
|
||||||
|
* we were waiting for the iolock.
|
||||||
|
*/
|
||||||
|
if (mapping->nrpages && *iolock == XFS_IOLOCK_SHARED) {
|
||||||
|
xfs_rw_iunlock(ip, *iolock);
|
||||||
|
*iolock = XFS_IOLOCK_EXCL;
|
||||||
|
xfs_rw_ilock(ip, *iolock);
|
||||||
|
}
|
||||||
|
|
||||||
ret = xfs_file_aio_write_checks(file, &pos, &count, new_size, iolock);
|
ret = xfs_file_aio_write_checks(file, &pos, &count, new_size, iolock);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (mapping->nrpages) {
|
if (mapping->nrpages) {
|
||||||
WARN_ON(*iolock != XFS_IOLOCK_EXCL);
|
|
||||||
ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1,
|
ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1,
|
||||||
FI_REMAPF_LOCKED);
|
FI_REMAPF_LOCKED);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
@ -851,7 +862,7 @@ xfs_file_buffered_aio_write(
|
||||||
size_t count = ocount;
|
size_t count = ocount;
|
||||||
|
|
||||||
*iolock = XFS_IOLOCK_EXCL;
|
*iolock = XFS_IOLOCK_EXCL;
|
||||||
xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);
|
xfs_rw_ilock(ip, *iolock);
|
||||||
|
|
||||||
ret = xfs_file_aio_write_checks(file, &pos, &count, new_size, iolock);
|
ret = xfs_file_aio_write_checks(file, &pos, &count, new_size, iolock);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|
Loading…
Reference in a new issue