xfs: selectively keep sick inodes in memory

It's important that the filesystem retain its memory of sick inodes for
a little while after problems are found so that reports can be collected
about what was wrong.  Don't let inode reclamation free sick inodes
unless we're unmounting or the fs already went down.

Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Carlos Maiolino <cmaiolino@redhat.com>
This commit is contained in:
Darrick J. Wong 2021-06-07 09:34:50 -07:00
parent 7975e465af
commit 9492750a8b
1 changed files with 39 additions and 6 deletions

View File

@ -71,10 +71,13 @@ static int xfs_icwalk_ag(struct xfs_perag *pag,
/* Stop scanning after icw_scan_limit inodes. */
#define XFS_ICWALK_FLAG_SCAN_LIMIT (1U << 28)
#define XFS_ICWALK_FLAG_RECLAIM_SICK (1U << 27)
#define XFS_ICWALK_PRIVATE_FLAGS (XFS_ICWALK_FLAG_DROP_UDQUOT | \
XFS_ICWALK_FLAG_DROP_GDQUOT | \
XFS_ICWALK_FLAG_DROP_PDQUOT | \
XFS_ICWALK_FLAG_SCAN_LIMIT)
XFS_ICWALK_FLAG_SCAN_LIMIT | \
XFS_ICWALK_FLAG_RECLAIM_SICK)
/*
* Allocate and initialise an xfs_inode.
@ -910,7 +913,8 @@ xfs_dqrele_all_inodes(
*/
static bool
xfs_reclaim_igrab(
struct xfs_inode *ip)
struct xfs_inode *ip,
struct xfs_eofblocks *eofb)
{
ASSERT(rcu_read_lock_held());
@ -921,6 +925,14 @@ xfs_reclaim_igrab(
spin_unlock(&ip->i_flags_lock);
return false;
}
/* Don't reclaim a sick inode unless the caller asked for it. */
if (ip->i_sick &&
(!eofb || !(eofb->eof_flags & XFS_ICWALK_FLAG_RECLAIM_SICK))) {
spin_unlock(&ip->i_flags_lock);
return false;
}
__xfs_iflags_set(ip, XFS_IRECLAIM);
spin_unlock(&ip->i_flags_lock);
return true;
@ -1021,13 +1033,30 @@ out:
xfs_iflags_clear(ip, XFS_IRECLAIM);
}
/* Reclaim sick inodes if we're unmounting or the fs went down. */
static inline bool
xfs_want_reclaim_sick(
struct xfs_mount *mp)
{
return (mp->m_flags & XFS_MOUNT_UNMOUNTING) ||
(mp->m_flags & XFS_MOUNT_NORECOVERY) ||
XFS_FORCED_SHUTDOWN(mp);
}
void
xfs_reclaim_inodes(
struct xfs_mount *mp)
{
struct xfs_eofblocks eofb = {
.eof_flags = 0,
};
if (xfs_want_reclaim_sick(mp))
eofb.eof_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
xfs_ail_push_all_sync(mp->m_ail);
xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL);
xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &eofb);
}
}
@ -1048,6 +1077,9 @@ xfs_reclaim_inodes_nr(
.icw_scan_limit = nr_to_scan,
};
if (xfs_want_reclaim_sick(mp))
eofb.eof_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
/* kick background reclaimer and push the AIL */
xfs_reclaim_work_queue(mp);
xfs_ail_push_all(mp->m_ail);
@ -1597,7 +1629,8 @@ xfs_blockgc_free_quota(
static inline bool
xfs_icwalk_igrab(
enum xfs_icwalk_goal goal,
struct xfs_inode *ip)
struct xfs_inode *ip,
struct xfs_eofblocks *eofb)
{
switch (goal) {
case XFS_ICWALK_DQRELE:
@ -1605,7 +1638,7 @@ xfs_icwalk_igrab(
case XFS_ICWALK_BLOCKGC:
return xfs_blockgc_igrab(ip);
case XFS_ICWALK_RECLAIM:
return xfs_reclaim_igrab(ip);
return xfs_reclaim_igrab(ip, eofb);
default:
return false;
}
@ -1694,7 +1727,7 @@ restart:
for (i = 0; i < nr_found; i++) {
struct xfs_inode *ip = batch[i];
if (done || !xfs_icwalk_igrab(goal, ip))
if (done || !xfs_icwalk_igrab(goal, ip, eofb))
batch[i] = NULL;
/*