xfs: factor out forced iclog flushes

We force iclogs in several places - we need them all to have the
same cache flush semantics, so start by factoring out the iclog
force into a common helper.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
This commit is contained in:
Dave Chinner 2021-07-27 16:23:48 -07:00 committed by Darrick J. Wong
parent 0dc8f7f139
commit 45eddb4140
1 changed files with 18 additions and 24 deletions

View File

@ -778,6 +778,20 @@ xfs_log_mount_cancel(
xfs_log_unmount(mp);
}
/*
* Flush out the iclog to disk ensuring that device caches are flushed and
* the iclog hits stable storage before any completion waiters are woken.
*/
static inline int
xlog_force_iclog(
struct xlog_in_core *iclog)
{
atomic_inc(&iclog->ic_refcnt);
if (iclog->ic_state == XLOG_STATE_ACTIVE)
xlog_state_switch_iclogs(iclog->ic_log, iclog, 0);
return xlog_state_release_iclog(iclog->ic_log, iclog, 0);
}
/*
* Wait for the iclog and all prior iclogs to be written disk as required by the
* log force state machine. Waiting on ic_force_wait ensures iclog completions
@ -863,18 +877,8 @@ out_err:
spin_lock(&log->l_icloglock);
iclog = log->l_iclog;
atomic_inc(&iclog->ic_refcnt);
if (iclog->ic_state == XLOG_STATE_ACTIVE)
xlog_state_switch_iclogs(log, iclog, 0);
else
ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC ||
iclog->ic_state == XLOG_STATE_IOERROR);
/*
* Ensure the journal is fully flushed and on stable storage once the
* iclog containing the unmount record is written.
*/
iclog->ic_flags |= (XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
error = xlog_state_release_iclog(log, iclog, 0);
error = xlog_force_iclog(iclog);
xlog_wait_on_iclog(iclog);
if (tic) {
@ -3201,17 +3205,9 @@ xfs_log_force(
iclog = iclog->ic_prev;
} else if (iclog->ic_state == XLOG_STATE_ACTIVE) {
if (atomic_read(&iclog->ic_refcnt) == 0) {
/*
* We are the only one with access to this iclog.
*
* Flush it out now. There should be a roundoff of zero
* to show that someone has already taken care of the
* roundoff from the previous sync.
*/
atomic_inc(&iclog->ic_refcnt);
/* We have exclusive access to this iclog. */
lsn = be64_to_cpu(iclog->ic_header.h_lsn);
xlog_state_switch_iclogs(log, iclog, 0);
if (xlog_state_release_iclog(log, iclog, 0))
if (xlog_force_iclog(iclog))
goto out_error;
if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn)
@ -3289,9 +3285,7 @@ xlog_force_lsn(
&log->l_icloglock);
return -EAGAIN;
}
atomic_inc(&iclog->ic_refcnt);
xlog_state_switch_iclogs(log, iclog, 0);
if (xlog_state_release_iclog(log, iclog, 0))
if (xlog_force_iclog(iclog))
goto out_error;
if (log_flushed)
*log_flushed = 1;