xfs: preserve inode health reports for longer

This is a quick series to make sure that inode sickness reports stick
 around in memory for some amount of time.
 
 v2: rebase to 5.13-rc4
 v3: require explicit request to reclaim sick inodes, drop weird icache
     miss interaction with DONTCACHE
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEUzaAxoMeQq6m2jMV+H93GTRKtOsFAmC/nIsACgkQ+H93GTRK
 tOvzGw//U6yMD71SGXbb8njp+d8cfO36b1NxgtJpYe1BuHm7ullAK+i1xIZBnqQQ
 4AhMCAa8wXwTc09v3vDrD71PwhLe3h/m0h+bomvqaN4Ud+QaEy2rFreILIwfZOjf
 +u5gmLXi3pEMkbBJpbp0Eqe1biOMswVvJjkf1AogVa5P+UO/vzNvPQJsuiJZnjvn
 8UF3zIBgXLXuq19TSQYNnimKsurxdCAsr+ushPt1MJbfdi9QoUsSFwrwpEa5u11r
 TaxeSqm0fO9/YNTlMZSkRMOshjEhAmGYCJoM+nd7VStN/g33YpZW0tnGxNTLC0lP
 kv9656LtbVGo/T7l9+SwxknwpuOZNZ09wK9khK0Ze7G2i+/RnHaus2JcUyQ+XQdm
 htG+DqqPNy2DNOUyouZgNIBjV01YMceGamZIenjFaorbbXGgSM0J+dH5q+nmZ5T7
 EtLsiNAtsTIoJK3lsB3P+QDLmbQX/9KVDhWBTCS7sqjNCJSWeJLhBelwE/1Ek35j
 mHwXFRwu9CcuqOZEVTB5fOX+/R1NE3szt6u9JLl7ZZAwhIl0o25fjwlbEA/xCELj
 15G7VFa/WIfQGNNmEm9ghyi7FbuOCNChcj+8xlNlkOQpkNTFFbQjmp/sBLvpn2CC
 SqUD5aBY9eB3455l7I4C7HPoTuKyS2Gy5O6VR1yZFflpyKJgC2M=
 =lxYL
 -----END PGP SIGNATURE-----

Merge tag 'fix-inode-health-reports-5.14_2021-06-08' of https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux into xfs-5.14-merge2

xfs: preserve inode health reports for longer

This is a quick series to make sure that inode sickness reports stick
around in memory for some amount of time.

v2: rebase to 5.13-rc4
v3: require explicit request to reclaim sick inodes, drop weird icache
    miss interaction with DONTCACHE

* tag 'fix-inode-health-reports-5.14_2021-06-08' of https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux:
  xfs: selectively keep sick inodes in memory
  xfs: drop IDONTCACHE on inodes when we mark them sick
  xfs: only reset incore inode health state flags when reclaiming an inode
This commit is contained in:
Darrick J. Wong 2021-06-08 09:38:24 -07:00
commit 295abff2fb
2 changed files with 50 additions and 9 deletions

View file

@ -229,6 +229,15 @@ xfs_inode_mark_sick(
ip->i_sick |= mask;
ip->i_checked |= mask;
spin_unlock(&ip->i_flags_lock);
/*
* Keep this inode around so we don't lose the sickness report. Scrub
* grabs inodes with DONTCACHE assuming that most inode are ok, which
* is not the case here.
*/
spin_lock(&VFS_I(ip)->i_lock);
VFS_I(ip)->i_state &= ~I_DONTCACHE;
spin_unlock(&VFS_I(ip)->i_lock);
}
/* Mark parts of an inode healed. */

View file

@ -71,10 +71,13 @@ static int xfs_icwalk_ag(struct xfs_perag *pag,
/* Stop scanning after icw_scan_limit inodes. */
#define XFS_ICWALK_FLAG_SCAN_LIMIT (1U << 28)
#define XFS_ICWALK_FLAG_RECLAIM_SICK (1U << 27)
#define XFS_ICWALK_PRIVATE_FLAGS (XFS_ICWALK_FLAG_DROP_UDQUOT | \
XFS_ICWALK_FLAG_DROP_GDQUOT | \
XFS_ICWALK_FLAG_DROP_PDQUOT | \
XFS_ICWALK_FLAG_SCAN_LIMIT)
XFS_ICWALK_FLAG_SCAN_LIMIT | \
XFS_ICWALK_FLAG_RECLAIM_SICK)
/*
* Allocate and initialise an xfs_inode.
@ -523,9 +526,6 @@ xfs_iget_cache_hit(
XFS_INO_TO_AGINO(pag->pag_mount, ino),
XFS_ICI_RECLAIM_TAG);
inode->i_state = I_NEW;
ip->i_sick = 0;
ip->i_checked = 0;
spin_unlock(&ip->i_flags_lock);
spin_unlock(&pag->pag_ici_lock);
} else {
@ -913,7 +913,8 @@ xfs_dqrele_all_inodes(
*/
static bool
xfs_reclaim_igrab(
struct xfs_inode *ip)
struct xfs_inode *ip,
struct xfs_eofblocks *eofb)
{
ASSERT(rcu_read_lock_held());
@ -924,6 +925,14 @@ xfs_reclaim_igrab(
spin_unlock(&ip->i_flags_lock);
return false;
}
/* Don't reclaim a sick inode unless the caller asked for it. */
if (ip->i_sick &&
(!eofb || !(eofb->eof_flags & XFS_ICWALK_FLAG_RECLAIM_SICK))) {
spin_unlock(&ip->i_flags_lock);
return false;
}
__xfs_iflags_set(ip, XFS_IRECLAIM);
spin_unlock(&ip->i_flags_lock);
return true;
@ -979,6 +988,8 @@ xfs_reclaim_inode(
spin_lock(&ip->i_flags_lock);
ip->i_flags = XFS_IRECLAIM;
ip->i_ino = 0;
ip->i_sick = 0;
ip->i_checked = 0;
spin_unlock(&ip->i_flags_lock);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
@ -1022,13 +1033,30 @@ xfs_reclaim_inode(
xfs_iflags_clear(ip, XFS_IRECLAIM);
}
/* Reclaim sick inodes if we're unmounting or the fs went down. */
static inline bool
xfs_want_reclaim_sick(
struct xfs_mount *mp)
{
return (mp->m_flags & XFS_MOUNT_UNMOUNTING) ||
(mp->m_flags & XFS_MOUNT_NORECOVERY) ||
XFS_FORCED_SHUTDOWN(mp);
}
void
xfs_reclaim_inodes(
struct xfs_mount *mp)
{
struct xfs_eofblocks eofb = {
.eof_flags = 0,
};
if (xfs_want_reclaim_sick(mp))
eofb.eof_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
xfs_ail_push_all_sync(mp->m_ail);
xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL);
xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &eofb);
}
}
@ -1049,6 +1077,9 @@ xfs_reclaim_inodes_nr(
.icw_scan_limit = nr_to_scan,
};
if (xfs_want_reclaim_sick(mp))
eofb.eof_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
/* kick background reclaimer and push the AIL */
xfs_reclaim_work_queue(mp);
xfs_ail_push_all(mp->m_ail);
@ -1598,7 +1629,8 @@ xfs_blockgc_free_quota(
static inline bool
xfs_icwalk_igrab(
enum xfs_icwalk_goal goal,
struct xfs_inode *ip)
struct xfs_inode *ip,
struct xfs_eofblocks *eofb)
{
switch (goal) {
case XFS_ICWALK_DQRELE:
@ -1606,7 +1638,7 @@ xfs_icwalk_igrab(
case XFS_ICWALK_BLOCKGC:
return xfs_blockgc_igrab(ip);
case XFS_ICWALK_RECLAIM:
return xfs_reclaim_igrab(ip);
return xfs_reclaim_igrab(ip, eofb);
default:
return false;
}
@ -1695,7 +1727,7 @@ xfs_icwalk_ag(
for (i = 0; i < nr_found; i++) {
struct xfs_inode *ip = batch[i];
if (done || !xfs_icwalk_igrab(goal, ip))
if (done || !xfs_icwalk_igrab(goal, ip, eofb))
batch[i] = NULL;
/*