xfs: give xfs_rmap_intent its own perag reference

Give the xfs_rmap_intent a passive reference to the perag structure
data.  This reference will be used to enable scrub intent draining
functionality in subsequent patches.  The space we're (reverse) mapping
is already allocated, so we need to be able to operate even if the AG is
being shrunk or offlined.

Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
This commit is contained in:
Darrick J. Wong 2023-04-11 18:59:54 -07:00
parent f6b384631e
commit c13418e8eb
3 changed files with 44 additions and 21 deletions

View file

@ -2394,7 +2394,6 @@ xfs_rmap_finish_one(
struct xfs_btree_cur **pcur)
{
struct xfs_mount *mp = tp->t_mountp;
struct xfs_perag *pag;
struct xfs_btree_cur *rcur;
struct xfs_buf *agbp = NULL;
int error = 0;
@ -2402,26 +2401,22 @@ xfs_rmap_finish_one(
xfs_agblock_t bno;
bool unwritten;
pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, ri->ri_bmap.br_startblock));
bno = XFS_FSB_TO_AGBNO(mp, ri->ri_bmap.br_startblock);
trace_xfs_rmap_deferred(mp, pag->pag_agno, ri->ri_type, bno,
trace_xfs_rmap_deferred(mp, ri->ri_pag->pag_agno, ri->ri_type, bno,
ri->ri_owner, ri->ri_whichfork,
ri->ri_bmap.br_startoff, ri->ri_bmap.br_blockcount,
ri->ri_bmap.br_state);
if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_RMAP_FINISH_ONE)) {
error = -EIO;
goto out_drop;
}
if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_RMAP_FINISH_ONE))
return -EIO;
/*
* If we haven't gotten a cursor or the cursor AG doesn't match
* the startblock, get one now.
*/
rcur = *pcur;
if (rcur != NULL && rcur->bc_ag.pag != pag) {
if (rcur != NULL && rcur->bc_ag.pag != ri->ri_pag) {
xfs_rmap_finish_one_cleanup(tp, rcur, 0);
rcur = NULL;
*pcur = NULL;
@ -2432,15 +2427,13 @@ xfs_rmap_finish_one(
* rmapbt, because a shape change could cause us to
* allocate blocks.
*/
error = xfs_free_extent_fix_freelist(tp, pag, &agbp);
error = xfs_free_extent_fix_freelist(tp, ri->ri_pag, &agbp);
if (error)
goto out_drop;
if (XFS_IS_CORRUPT(tp->t_mountp, !agbp)) {
error = -EFSCORRUPTED;
goto out_drop;
}
return error;
if (XFS_IS_CORRUPT(tp->t_mountp, !agbp))
return -EFSCORRUPTED;
rcur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag);
rcur = xfs_rmapbt_init_cursor(mp, tp, agbp, ri->ri_pag);
}
*pcur = rcur;
@ -2480,8 +2473,7 @@ xfs_rmap_finish_one(
ASSERT(0);
error = -EFSCORRUPTED;
}
out_drop:
xfs_perag_put(pag);
return error;
}
@ -2526,6 +2518,7 @@ __xfs_rmap_add(
ri->ri_whichfork = whichfork;
ri->ri_bmap = *bmap;
xfs_rmap_update_get_group(tp->t_mountp, ri);
xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_RMAP, &ri->ri_list);
}

View file

@ -162,8 +162,12 @@ struct xfs_rmap_intent {
int ri_whichfork;
uint64_t ri_owner;
struct xfs_bmbt_irec ri_bmap;
struct xfs_perag *ri_pag;
};
void xfs_rmap_update_get_group(struct xfs_mount *mp,
struct xfs_rmap_intent *ri);
/* functions for updating the rmapbt based on bmbt map/unmap operations */
void xfs_rmap_map_extent(struct xfs_trans *tp, struct xfs_inode *ip,
int whichfork, struct xfs_bmbt_irec *imap);

View file

@ -20,6 +20,7 @@
#include "xfs_error.h"
#include "xfs_log_priv.h"
#include "xfs_log_recover.h"
#include "xfs_ag.h"
struct kmem_cache *xfs_rui_cache;
struct kmem_cache *xfs_rud_cache;
@ -320,14 +321,13 @@ xfs_rmap_update_diff_items(
const struct list_head *a,
const struct list_head *b)
{
struct xfs_mount *mp = priv;
struct xfs_rmap_intent *ra;
struct xfs_rmap_intent *rb;
ra = container_of(a, struct xfs_rmap_intent, ri_list);
rb = container_of(b, struct xfs_rmap_intent, ri_list);
return XFS_FSB_TO_AGNO(mp, ra->ri_bmap.br_startblock) -
XFS_FSB_TO_AGNO(mp, rb->ri_bmap.br_startblock);
return ra->ri_pag->pag_agno - rb->ri_pag->pag_agno;
}
/* Log rmap updates in the intent item. */
@ -390,6 +390,26 @@ xfs_rmap_update_create_done(
return &xfs_trans_get_rud(tp, RUI_ITEM(intent))->rud_item;
}
/* Take a passive ref to the AG containing the space we're rmapping. */
void
xfs_rmap_update_get_group(
struct xfs_mount *mp,
struct xfs_rmap_intent *ri)
{
xfs_agnumber_t agno;
agno = XFS_FSB_TO_AGNO(mp, ri->ri_bmap.br_startblock);
ri->ri_pag = xfs_perag_get(mp, agno);
}
/* Release a passive AG ref after finishing rmapping work. */
static inline void
xfs_rmap_update_put_group(
struct xfs_rmap_intent *ri)
{
xfs_perag_put(ri->ri_pag);
}
/* Process a deferred rmap update. */
STATIC int
xfs_rmap_update_finish_item(
@ -405,6 +425,8 @@ xfs_rmap_update_finish_item(
error = xfs_trans_log_finish_rmap_update(tp, RUD_ITEM(done), ri,
state);
xfs_rmap_update_put_group(ri);
kmem_cache_free(xfs_rmap_intent_cache, ri);
return error;
}
@ -425,6 +447,8 @@ xfs_rmap_update_cancel_item(
struct xfs_rmap_intent *ri;
ri = container_of(item, struct xfs_rmap_intent, ri_list);
xfs_rmap_update_put_group(ri);
kmem_cache_free(xfs_rmap_intent_cache, ri);
}
@ -559,11 +583,13 @@ xfs_rui_item_recover(
fake.ri_bmap.br_state = (map->me_flags & XFS_RMAP_EXTENT_UNWRITTEN) ?
XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
xfs_rmap_update_get_group(mp, &fake);
error = xfs_trans_log_finish_rmap_update(tp, rudp, &fake,
&rcur);
if (error == -EFSCORRUPTED)
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
map, sizeof(*map));
xfs_rmap_update_put_group(&fake);
if (error)
goto abort_error;