xfs: cleanup data end I/O handlers

Currently we have different end I/O handlers for read vs the different
types of write I/O.  But they are all very similar so we could just
use one with a few conditionals and reduce code size a lot.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Alex Elder <aelder@sgi.com>
Signed-off-by: Alex Elder <aelder@sgi.com>
This commit is contained in:
Christoph Hellwig 2009-10-30 09:11:47 +00:00 committed by Alex Elder
parent 06342cf8ad
commit 5ec4fabb02

View file

@ -235,71 +235,36 @@ xfs_setfilesize(
} }
/* /*
* Buffered IO write completion for delayed allocate extents. * IO write completion.
*/ */
STATIC void STATIC void
xfs_end_bio_delalloc( xfs_end_io(
struct work_struct *work)
{
xfs_ioend_t *ioend =
container_of(work, xfs_ioend_t, io_work);
xfs_setfilesize(ioend);
xfs_destroy_ioend(ioend);
}
/*
* Buffered IO write completion for regular, written extents.
*/
STATIC void
xfs_end_bio_written(
struct work_struct *work)
{
xfs_ioend_t *ioend =
container_of(work, xfs_ioend_t, io_work);
xfs_setfilesize(ioend);
xfs_destroy_ioend(ioend);
}
/*
* IO write completion for unwritten extents.
*
* Issue transactions to convert a buffer range from unwritten
* to written extents.
*/
STATIC void
xfs_end_bio_unwritten(
struct work_struct *work) struct work_struct *work)
{ {
xfs_ioend_t *ioend = xfs_ioend_t *ioend =
container_of(work, xfs_ioend_t, io_work); container_of(work, xfs_ioend_t, io_work);
struct xfs_inode *ip = XFS_I(ioend->io_inode); struct xfs_inode *ip = XFS_I(ioend->io_inode);
xfs_off_t offset = ioend->io_offset;
size_t size = ioend->io_size;
if (likely(!ioend->io_error)) { /*
if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { * For unwritten extents we need to issue transactions to convert a
* range to normal written extens after the data I/O has finished.
*/
if (ioend->io_type == IOMAP_UNWRITTEN &&
likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) {
int error; int error;
error = xfs_iomap_write_unwritten(ip, offset, size);
error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
ioend->io_size);
if (error) if (error)
ioend->io_error = error; ioend->io_error = error;
} }
xfs_setfilesize(ioend);
}
xfs_destroy_ioend(ioend);
}
/* /*
* IO read completion for regular, written extents. * We might have to update the on-disk file size after extending
* writes.
*/ */
STATIC void if (ioend->io_type != IOMAP_READ)
xfs_end_bio_read( xfs_setfilesize(ioend);
struct work_struct *work)
{
xfs_ioend_t *ioend =
container_of(work, xfs_ioend_t, io_work);
xfs_destroy_ioend(ioend); xfs_destroy_ioend(ioend);
} }
@ -314,10 +279,10 @@ xfs_finish_ioend(
int wait) int wait)
{ {
if (atomic_dec_and_test(&ioend->io_remaining)) { if (atomic_dec_and_test(&ioend->io_remaining)) {
struct workqueue_struct *wq = xfsdatad_workqueue; struct workqueue_struct *wq;
if (ioend->io_work.func == xfs_end_bio_unwritten)
wq = xfsconvertd_workqueue;
wq = (ioend->io_type == IOMAP_UNWRITTEN) ?
xfsconvertd_workqueue : xfsdatad_workqueue;
queue_work(wq, &ioend->io_work); queue_work(wq, &ioend->io_work);
if (wait) if (wait)
flush_workqueue(wq); flush_workqueue(wq);
@ -355,15 +320,7 @@ xfs_alloc_ioend(
ioend->io_offset = 0; ioend->io_offset = 0;
ioend->io_size = 0; ioend->io_size = 0;
if (type == IOMAP_UNWRITTEN) INIT_WORK(&ioend->io_work, xfs_end_io);
INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten);
else if (type == IOMAP_DELAY)
INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc);
else if (type == IOMAP_READ)
INIT_WORK(&ioend->io_work, xfs_end_bio_read);
else
INIT_WORK(&ioend->io_work, xfs_end_bio_written);
return ioend; return ioend;
} }
@ -1538,7 +1495,7 @@ xfs_end_io_direct(
* didn't map an unwritten extent so switch it's completion * didn't map an unwritten extent so switch it's completion
* handler. * handler.
*/ */
INIT_WORK(&ioend->io_work, xfs_end_bio_written); ioend->io_type = IOMAP_NEW;
xfs_finish_ioend(ioend, 0); xfs_finish_ioend(ioend, 0);
} }