linux-stable/fs/xfs/scrub/common.h

175 lines
5.7 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2017-2023 Oracle. All Rights Reserved.
* Author: Darrick J. Wong <djwong@kernel.org>
*/
#ifndef __XFS_SCRUB_COMMON_H__
#define __XFS_SCRUB_COMMON_H__
/*
* We /could/ terminate a scrub/repair operation early. If we're not
* in a good place to continue (fatal signal, etc.) then bail out.
* Note that we're careful not to make any judgements about *error.
*/
static inline bool
xchk_should_terminate(
struct xfs_scrub *sc,
xfs: periodically yield scrub threads to the scheduler Christoph Hellwig complained about the following soft lockup warning when running scrub after generic/175 when preemption is disabled and slub debugging is enabled: watchdog: BUG: soft lockup - CPU#3 stuck for 22s! [xfs_scrub:161] Modules linked in: irq event stamp: 41692326 hardirqs last enabled at (41692325): [<ffffffff8232c3b7>] _raw_0 hardirqs last disabled at (41692326): [<ffffffff81001c5a>] trace0 softirqs last enabled at (41684994): [<ffffffff8260031f>] __do_e softirqs last disabled at (41684987): [<ffffffff81127d8c>] irq_e0 CPU: 3 PID: 16189 Comm: xfs_scrub Not tainted 5.4.0-rc3+ #30 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.124 RIP: 0010:_raw_spin_unlock_irqrestore+0x39/0x40 Code: 89 f3 be 01 00 00 00 e8 d5 3a e5 fe 48 89 ef e8 ed 87 e5 f2 RSP: 0018:ffffc9000233f970 EFLAGS: 00000286 ORIG_RAX: ffffffffff3 RAX: ffff88813b398040 RBX: 0000000000000286 RCX: 0000000000000006 RDX: 0000000000000006 RSI: ffff88813b3988c0 RDI: ffff88813b398040 RBP: ffff888137958640 R08: 0000000000000001 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000000 R12: ffffea00042b0c00 R13: 0000000000000001 R14: ffff88810ac32308 R15: ffff8881376fc040 FS: 00007f6113dea700(0000) GS:ffff88813bb80000(0000) knlGS:00000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f6113de8ff8 CR3: 000000012f290000 CR4: 00000000000006e0 Call Trace: free_debug_processing+0x1dd/0x240 __slab_free+0x231/0x410 kmem_cache_free+0x30e/0x360 xchk_ag_btcur_free+0x76/0xb0 xchk_ag_free+0x10/0x80 xchk_bmap_iextent_xref.isra.14+0xd9/0x120 xchk_bmap_iextent+0x187/0x210 xchk_bmap+0x2e0/0x3b0 xfs_scrub_metadata+0x2e7/0x500 xfs_ioc_scrub_metadata+0x4a/0xa0 xfs_file_ioctl+0x58a/0xcd0 do_vfs_ioctl+0xa0/0x6f0 ksys_ioctl+0x5b/0x90 __x64_sys_ioctl+0x11/0x20 do_syscall_64+0x4b/0x1a0 entry_SYSCALL_64_after_hwframe+0x49/0xbe If preemption is disabled, all metadata buffers needed to perform the scrub are already in memory, and there are a lot of records to check, it's possible that the scrub thread will run for an extended period of time without sleeping for IO or any other reason. Then the watchdog timer or the RCU stall timeout can trigger, producing the backtrace above. To fix this problem, call cond_resched() from the scrub thread so that we back out to the scheduler whenever necessary. Reported-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com> Reviewed-by: Christoph Hellwig <hch@lst.de>
2019-11-05 23:33:57 +00:00
int *error)
{
xfs: periodically yield scrub threads to the scheduler Christoph Hellwig complained about the following soft lockup warning when running scrub after generic/175 when preemption is disabled and slub debugging is enabled: watchdog: BUG: soft lockup - CPU#3 stuck for 22s! [xfs_scrub:161] Modules linked in: irq event stamp: 41692326 hardirqs last enabled at (41692325): [<ffffffff8232c3b7>] _raw_0 hardirqs last disabled at (41692326): [<ffffffff81001c5a>] trace0 softirqs last enabled at (41684994): [<ffffffff8260031f>] __do_e softirqs last disabled at (41684987): [<ffffffff81127d8c>] irq_e0 CPU: 3 PID: 16189 Comm: xfs_scrub Not tainted 5.4.0-rc3+ #30 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.124 RIP: 0010:_raw_spin_unlock_irqrestore+0x39/0x40 Code: 89 f3 be 01 00 00 00 e8 d5 3a e5 fe 48 89 ef e8 ed 87 e5 f2 RSP: 0018:ffffc9000233f970 EFLAGS: 00000286 ORIG_RAX: ffffffffff3 RAX: ffff88813b398040 RBX: 0000000000000286 RCX: 0000000000000006 RDX: 0000000000000006 RSI: ffff88813b3988c0 RDI: ffff88813b398040 RBP: ffff888137958640 R08: 0000000000000001 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000000 R12: ffffea00042b0c00 R13: 0000000000000001 R14: ffff88810ac32308 R15: ffff8881376fc040 FS: 00007f6113dea700(0000) GS:ffff88813bb80000(0000) knlGS:00000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f6113de8ff8 CR3: 000000012f290000 CR4: 00000000000006e0 Call Trace: free_debug_processing+0x1dd/0x240 __slab_free+0x231/0x410 kmem_cache_free+0x30e/0x360 xchk_ag_btcur_free+0x76/0xb0 xchk_ag_free+0x10/0x80 xchk_bmap_iextent_xref.isra.14+0xd9/0x120 xchk_bmap_iextent+0x187/0x210 xchk_bmap+0x2e0/0x3b0 xfs_scrub_metadata+0x2e7/0x500 xfs_ioc_scrub_metadata+0x4a/0xa0 xfs_file_ioctl+0x58a/0xcd0 do_vfs_ioctl+0xa0/0x6f0 ksys_ioctl+0x5b/0x90 __x64_sys_ioctl+0x11/0x20 do_syscall_64+0x4b/0x1a0 entry_SYSCALL_64_after_hwframe+0x49/0xbe If preemption is disabled, all metadata buffers needed to perform the scrub are already in memory, and there are a lot of records to check, it's possible that the scrub thread will run for an extended period of time without sleeping for IO or any other reason. Then the watchdog timer or the RCU stall timeout can trigger, producing the backtrace above. To fix this problem, call cond_resched() from the scrub thread so that we back out to the scheduler whenever necessary. Reported-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com> Reviewed-by: Christoph Hellwig <hch@lst.de>
2019-11-05 23:33:57 +00:00
/*
* If preemption is disabled, we need to yield to the scheduler every
* few seconds so that we don't run afoul of the soft lockup watchdog
* or RCU stall detector.
*/
cond_resched();
if (fatal_signal_pending(current)) {
if (*error == 0)
*error = -EINTR;
return true;
}
return false;
}
int xchk_trans_alloc(struct xfs_scrub *sc, uint resblks);
void xchk_trans_cancel(struct xfs_scrub *sc);
bool xchk_process_error(struct xfs_scrub *sc, xfs_agnumber_t agno,
xfs_agblock_t bno, int *error);
bool xchk_fblock_process_error(struct xfs_scrub *sc, int whichfork,
xfs_fileoff_t offset, int *error);
bool xchk_xref_process_error(struct xfs_scrub *sc,
xfs_agnumber_t agno, xfs_agblock_t bno, int *error);
bool xchk_fblock_xref_process_error(struct xfs_scrub *sc,
int whichfork, xfs_fileoff_t offset, int *error);
void xchk_block_set_preen(struct xfs_scrub *sc,
struct xfs_buf *bp);
void xchk_ino_set_preen(struct xfs_scrub *sc, xfs_ino_t ino);
void xchk_set_corrupt(struct xfs_scrub *sc);
void xchk_block_set_corrupt(struct xfs_scrub *sc,
struct xfs_buf *bp);
void xchk_ino_set_corrupt(struct xfs_scrub *sc, xfs_ino_t ino);
void xchk_fblock_set_corrupt(struct xfs_scrub *sc, int whichfork,
xfs_fileoff_t offset);
void xchk_block_xref_set_corrupt(struct xfs_scrub *sc,
struct xfs_buf *bp);
void xchk_ino_xref_set_corrupt(struct xfs_scrub *sc,
xfs_ino_t ino);
void xchk_fblock_xref_set_corrupt(struct xfs_scrub *sc,
int whichfork, xfs_fileoff_t offset);
void xchk_ino_set_warning(struct xfs_scrub *sc, xfs_ino_t ino);
void xchk_fblock_set_warning(struct xfs_scrub *sc, int whichfork,
xfs_fileoff_t offset);
void xchk_set_incomplete(struct xfs_scrub *sc);
int xchk_checkpoint_log(struct xfs_mount *mp);
/* Are we set up for a cross-referencing check? */
bool xchk_should_check_xref(struct xfs_scrub *sc, int *error,
struct xfs_btree_cur **curpp);
/* Setup functions */
int xchk_setup_agheader(struct xfs_scrub *sc);
int xchk_setup_fs(struct xfs_scrub *sc);
int xchk_setup_ag_allocbt(struct xfs_scrub *sc);
int xchk_setup_ag_iallocbt(struct xfs_scrub *sc);
int xchk_setup_ag_rmapbt(struct xfs_scrub *sc);
int xchk_setup_ag_refcountbt(struct xfs_scrub *sc);
int xchk_setup_inode(struct xfs_scrub *sc);
int xchk_setup_inode_bmap(struct xfs_scrub *sc);
int xchk_setup_inode_bmap_data(struct xfs_scrub *sc);
int xchk_setup_directory(struct xfs_scrub *sc);
int xchk_setup_xattr(struct xfs_scrub *sc);
int xchk_setup_symlink(struct xfs_scrub *sc);
int xchk_setup_parent(struct xfs_scrub *sc);
#ifdef CONFIG_XFS_RT
int xchk_setup_rt(struct xfs_scrub *sc);
#else
static inline int
xchk_setup_rt(struct xfs_scrub *sc)
{
return -ENOENT;
}
#endif
#ifdef CONFIG_XFS_QUOTA
int xchk_setup_quota(struct xfs_scrub *sc);
#else
static inline int
xchk_setup_quota(struct xfs_scrub *sc)
{
return -ENOENT;
}
#endif
int xchk_setup_fscounters(struct xfs_scrub *sc);
void xchk_ag_free(struct xfs_scrub *sc, struct xchk_ag *sa);
int xchk_ag_init(struct xfs_scrub *sc, xfs_agnumber_t agno,
struct xchk_ag *sa);
/*
* Grab all AG resources, treating the inability to grab the perag structure as
* a fs corruption. This is intended for callers checking an ondisk reference
* to a given AG, which means that the AG must still exist.
*/
static inline int
xchk_ag_init_existing(
struct xfs_scrub *sc,
xfs_agnumber_t agno,
struct xchk_ag *sa)
{
int error = xchk_ag_init(sc, agno, sa);
return error == -ENOENT ? -EFSCORRUPTED : error;
}
int xchk_ag_read_headers(struct xfs_scrub *sc, xfs_agnumber_t agno,
struct xchk_ag *sa);
void xchk_ag_btcur_free(struct xchk_ag *sa);
void xchk_ag_btcur_init(struct xfs_scrub *sc, struct xchk_ag *sa);
int xchk_count_rmap_ownedby_ag(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
const struct xfs_owner_info *oinfo, xfs_filblks_t *blocks);
int xchk_setup_ag_btree(struct xfs_scrub *sc, bool force_log);
int xchk_iget_for_scrubbing(struct xfs_scrub *sc);
int xchk_setup_inode_contents(struct xfs_scrub *sc, unsigned int resblks);
void xchk_buffer_recheck(struct xfs_scrub *sc, struct xfs_buf *bp);
int xchk_iget(struct xfs_scrub *sc, xfs_ino_t inum, struct xfs_inode **ipp);
int xchk_iget_agi(struct xfs_scrub *sc, xfs_ino_t inum,
struct xfs_buf **agi_bpp, struct xfs_inode **ipp);
void xchk_irele(struct xfs_scrub *sc, struct xfs_inode *ip);
xfs: retain the AGI when we can't iget an inode to scrub the core xchk_get_inode is not quite the right function to be calling from the inode scrubber setup function. The common get_inode function either gets an inode and installs it in the scrub context, or it returns an error code explaining what happened. This is acceptable for most file scrubbers because it is not in their scope to fix corruptions in the inode core and fork areas that cause iget to fail. Dealing with these problems is within the scope of the inode scrubber, however. If iget fails with EFSCORRUPTED, we need to xchk_inode to flag that as corruption. Since we can't get our hands on an incore inode, we need to hold the AGI to prevent inode allocation activity so that nothing changes in the inode metadata. Looking ahead to the inode core repair patches, we will also need to hold the AGI buffer into xrep_inode so that we can make modifications to the xfs_dinode structure without any other thread swooping in to allocate or free the inode. Adapt the xchk_get_inode into xchk_setup_inode since this is a one-off use case where the error codes we check for are a little different, and the return state is much different from the common function. xchk_setup_inode prepares to check or repair an inode record, so it must continue the scrub operation even if the inode/inobt verifiers cause xfs_iget to return EFSCORRUPTED. This is done by attaching the locked AGI buffer to the scrub transaction and returning 0 to move on to the actual scrub. (Later, the online inode repair code will also want the xfs_imap structure so that it can reset the ondisk xfs_dinode structure.) xchk_get_inode retrieves an inode on behalf of a scrubber that operates on an incore inode -- data/attr/cow forks, directories, xattrs, symlinks, parent pointers, etc. If the inode/inobt verifiers fail and xfs_iget returns EFSCORRUPTED, we want to exit to userspace (because the caller should be fix the inode first) and drop everything we acquired along the way. A behavior common to both functions is that it's possible that xfs_scrub asked for a scrub-by-handle concurrent with the inode being freed or the passed-in inumber is invalid. In this case, we call xfs_imap to see if the inobt index thinks the inode is allocated, and return ENOENT ("nothing to check here") to userspace if this is not the case. The imap lookup is why both functions call xchk_iget_agi. Signed-off-by: Darrick J. Wong <djwong@kernel.org> Reviewed-by: Dave Chinner <dchinner@redhat.com>
2023-04-12 02:00:22 +00:00
int xchk_install_handle_inode(struct xfs_scrub *sc, struct xfs_inode *ip);
/*
* Don't bother cross-referencing if we already found corruption or cross
* referencing discrepancies.
*/
static inline bool xchk_skip_xref(struct xfs_scrub_metadata *sm)
{
return sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT |
XFS_SCRUB_OFLAG_XCORRUPT);
}
int xchk_metadata_inode_forks(struct xfs_scrub *sc);
/*
* Setting up a hook to wait for intents to drain is costly -- we have to take
* the CPU hotplug lock and force an i-cache flush on all CPUs once to set it
* up, and again to tear it down. These costs add up quickly, so we only want
* to enable the drain waiter if the drain actually detected a conflict with
* running intent chains.
*/
static inline bool xchk_need_intent_drain(struct xfs_scrub *sc)
{
return sc->flags & XCHK_NEED_DRAIN;
}
void xchk_fsgates_enable(struct xfs_scrub *sc, unsigned int scrub_fshooks);
#endif /* __XFS_SCRUB_COMMON_H__ */