linux-stable/fs/xfs/xfs_bmap_util.c

1856 lines
48 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2000-2006 Silicon Graphics, Inc.
* Copyright (c) 2012 Red Hat, Inc.
* All Rights Reserved.
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_mount.h"
#include "xfs_defer.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "xfs_trans.h"
#include "xfs_alloc.h"
#include "xfs_bmap.h"
#include "xfs_bmap_util.h"
#include "xfs_bmap_btree.h"
#include "xfs_rtalloc.h"
#include "xfs_error.h"
#include "xfs_quota.h"
#include "xfs_trans_space.h"
#include "xfs_trace.h"
#include "xfs_icache.h"
#include "xfs_iomap.h"
#include "xfs_reflink.h"
/* Kernel only BMAP related definitions and functions */
/*
* Convert the given file system block to a disk block. We have to treat it
* differently based on whether the file is a real time file or not, because the
* bmap code does.
*/
xfs_daddr_t
xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
{
if (XFS_IS_REALTIME_INODE(ip))
return XFS_FSB_TO_BB(ip->i_mount, fsb);
return XFS_FSB_TO_DADDR(ip->i_mount, fsb);
}
/*
* Routine to zero an extent on disk allocated to the specific inode.
*
* The VFS functions take a linearised filesystem block offset, so we have to
* convert the sparse xfs fsb to the right format first.
* VFS types are real funky, too.
*/
int
xfs_zero_extent(
struct xfs_inode *ip,
xfs_fsblock_t start_fsb,
xfs_off_t count_fsb)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_buftarg *target = xfs_inode_buftarg(ip);
xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb);
sector_t block = XFS_BB_TO_FSBT(mp, sector);
return blkdev_issue_zeroout(target->bt_bdev,
block << (mp->m_super->s_blocksize_bits - 9),
count_fsb << (mp->m_super->s_blocksize_bits - 9),
GFP_NOFS, 0);
}
#ifdef CONFIG_XFS_RT
int
xfs_bmap_rtalloc(
struct xfs_bmalloca *ap)
{
struct xfs_mount *mp = ap->ip->i_mount;
xfs_fileoff_t orig_offset = ap->offset;
xfs_rtblock_t rtb;
xfs_extlen_t prod = 0; /* product factor for allocators */
xfs_extlen_t mod = 0; /* product factor for allocators */
xfs_extlen_t ralen = 0; /* realtime allocation length */
xfs_extlen_t align; /* minimum allocation alignment */
xfs_extlen_t orig_length = ap->length;
xfs_extlen_t minlen = mp->m_sb.sb_rextsize;
xfs_extlen_t raminlen;
bool rtlocked = false;
bool ignore_locality = false;
int error;
align = xfs_get_extsz_hint(ap->ip);
retry:
prod = align / mp->m_sb.sb_rextsize;
error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
align, 1, ap->eof, 0,
ap->conv, &ap->offset, &ap->length);
if (error)
return error;
ASSERT(ap->length);
ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
/*
* If we shifted the file offset downward to satisfy an extent size
* hint, increase minlen by that amount so that the allocator won't
* give us an allocation that's too short to cover at least one of the
* blocks that the caller asked for.
*/
if (ap->offset != orig_offset)
minlen += orig_offset - ap->offset;
/*
* If the offset & length are not perfectly aligned
* then kill prod, it will just get us in trouble.
*/
div_u64_rem(ap->offset, align, &mod);
if (mod || ap->length % align)
prod = 1;
/*
* Set ralen to be the actual requested length in rtextents.
*/
ralen = ap->length / mp->m_sb.sb_rextsize;
/*
* If the old value was close enough to XFS_BMBT_MAX_EXTLEN that
* we rounded up to it, cut it back so it's valid again.
* Note that if it's a really large request (bigger than
* XFS_BMBT_MAX_EXTLEN), we don't hear about that number, and can't
* adjust the starting point to match it.
*/
if (ralen * mp->m_sb.sb_rextsize >= XFS_MAX_BMBT_EXTLEN)
ralen = XFS_MAX_BMBT_EXTLEN / mp->m_sb.sb_rextsize;
/*
* Lock out modifications to both the RT bitmap and summary inodes
*/
if (!rtlocked) {
xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
rtlocked = true;
}
/*
* If it's an allocation to an empty file at offset 0,
* pick an extent that will space things out in the rt area.
*/
if (ap->eof && ap->offset == 0) {
treewide: Remove uninitialized_var() usage Using uninitialized_var() is dangerous as it papers over real bugs[1] (or can in the future), and suppresses unrelated compiler warnings (e.g. "unused variable"). If the compiler thinks it is uninitialized, either simply initialize the variable or make compiler changes. In preparation for removing[2] the[3] macro[4], remove all remaining needless uses with the following script: git grep '\buninitialized_var\b' | cut -d: -f1 | sort -u | \ xargs perl -pi -e \ 's/\buninitialized_var\(([^\)]+)\)/\1/g; s:\s*/\* (GCC be quiet|to make compiler happy) \*/$::g;' drivers/video/fbdev/riva/riva_hw.c was manually tweaked to avoid pathological white-space. No outstanding warnings were found building allmodconfig with GCC 9.3.0 for x86_64, i386, arm64, arm, powerpc, powerpc64le, s390x, mips, sparc64, alpha, and m68k. [1] https://lore.kernel.org/lkml/20200603174714.192027-1-glider@google.com/ [2] https://lore.kernel.org/lkml/CA+55aFw+Vbj0i=1TGqCR5vQkCzWJ0QxK6CernOU6eedsudAixw@mail.gmail.com/ [3] https://lore.kernel.org/lkml/CA+55aFwgbgqhbp1fkxvRKEpzyR5J8n1vKT1VZdz9knmPuXhOeg@mail.gmail.com/ [4] https://lore.kernel.org/lkml/CA+55aFz2500WfbKXAx8s67wrm9=yVJu65TpLgN_ybYNv0VEOKA@mail.gmail.com/ Reviewed-by: Leon Romanovsky <leonro@mellanox.com> # drivers/infiniband and mlx4/mlx5 Acked-by: Jason Gunthorpe <jgg@mellanox.com> # IB Acked-by: Kalle Valo <kvalo@codeaurora.org> # wireless drivers Reviewed-by: Chao Yu <yuchao0@huawei.com> # erofs Signed-off-by: Kees Cook <keescook@chromium.org>
2020-06-03 20:09:38 +00:00
xfs_rtblock_t rtx; /* realtime extent no */
error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
if (error)
return error;
ap->blkno = rtx * mp->m_sb.sb_rextsize;
} else {
ap->blkno = 0;
}
xfs_bmap_adjacent(ap);
/*
* Realtime allocation, done through xfs_rtallocate_extent.
*/
if (ignore_locality)
ap->blkno = 0;
else
do_div(ap->blkno, mp->m_sb.sb_rextsize);
rtb = ap->blkno;
ap->length = ralen;
raminlen = max_t(xfs_extlen_t, 1, minlen / mp->m_sb.sb_rextsize);
error = xfs_rtallocate_extent(ap->tp, ap->blkno, raminlen, ap->length,
&ralen, ap->wasdel, prod, &rtb);
if (error)
return error;
if (rtb != NULLRTBLOCK) {
ap->blkno = rtb * mp->m_sb.sb_rextsize;
ap->length = ralen * mp->m_sb.sb_rextsize;
ap->ip->i_nblocks += ap->length;
xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
if (ap->wasdel)
ap->ip->i_delayed_blks -= ap->length;
/*
* Adjust the disk quota also. This was reserved
* earlier.
*/
xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
XFS_TRANS_DQ_RTBCOUNT, ap->length);
return 0;
}
if (align > mp->m_sb.sb_rextsize) {
/*
* We previously enlarged the request length to try to satisfy
* an extent size hint. The allocator didn't return anything,
* so reset the parameters to the original values and try again
* without alignment criteria.
*/
ap->offset = orig_offset;
ap->length = orig_length;
minlen = align = mp->m_sb.sb_rextsize;
goto retry;
}
if (!ignore_locality && ap->blkno != 0) {
/*
* If we can't allocate near a specific rt extent, try again
* without locality criteria.
*/
ignore_locality = true;
goto retry;
}
ap->blkno = NULLFSBLOCK;
ap->length = 0;
return 0;
}
#endif /* CONFIG_XFS_RT */
/*
* Extent tree block counting routines.
*/
/*
* Count leaf blocks given a range of extent records. Delayed allocation
* extents are not counted towards the totals.
*/
xfs_extnum_t
xfs_bmap_count_leaves(
struct xfs_ifork *ifp,
xfs_filblks_t *count)
{
struct xfs_iext_cursor icur;
struct xfs_bmbt_irec got;
xfs_extnum_t numrecs = 0;
for_each_xfs_iext(ifp, &icur, &got) {
if (!isnullstartblock(got.br_startblock)) {
*count += got.br_blockcount;
numrecs++;
}
}
return numrecs;
}
/*
* Count fsblocks of the given fork. Delayed allocation extents are
* not counted towards the totals.
*/
int
xfs_bmap_count_blocks(
struct xfs_trans *tp,
struct xfs_inode *ip,
int whichfork,
xfs_extnum_t *nextents,
xfs_filblks_t *count)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
struct xfs_btree_cur *cur;
xfs_extlen_t btblocks = 0;
int error;
*nextents = 0;
*count = 0;
if (!ifp)
return 0;
switch (ifp->if_format) {
case XFS_DINODE_FMT_BTREE:
error = xfs_iread_extents(tp, ip, whichfork);
if (error)
return error;
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
error = xfs_btree_count_blocks(cur, &btblocks);
xfs_btree_del_cursor(cur, error);
if (error)
return error;
/*
* xfs_btree_count_blocks includes the root block contained in
* the inode fork in @btblocks, so subtract one because we're
* only interested in allocated disk blocks.
*/
*count += btblocks - 1;
xfs: Fix fall-through warnings for Clang In preparation to enable -Wimplicit-fallthrough for Clang, fix the following warnings by replacing /* fall through */ comments, and its variants, with the new pseudo-keyword macro fallthrough: fs/xfs/libxfs/xfs_alloc.c:3167:2: warning: unannotated fall-through between switch labels [-Wimplicit-fallthrough] fs/xfs/libxfs/xfs_da_btree.c:286:3: warning: unannotated fall-through between switch labels [-Wimplicit-fallthrough] fs/xfs/libxfs/xfs_ag_resv.c:346:2: warning: unannotated fall-through between switch labels [-Wimplicit-fallthrough] fs/xfs/libxfs/xfs_ag_resv.c:388:2: warning: unannotated fall-through between switch labels [-Wimplicit-fallthrough] fs/xfs/xfs_bmap_util.c:246:2: warning: unannotated fall-through between switch labels [-Wimplicit-fallthrough] fs/xfs/xfs_export.c:88:2: warning: unannotated fall-through between switch labels [-Wimplicit-fallthrough] fs/xfs/xfs_export.c:96:2: warning: unannotated fall-through between switch labels [-Wimplicit-fallthrough] fs/xfs/xfs_file.c:867:3: warning: unannotated fall-through between switch labels [-Wimplicit-fallthrough] fs/xfs/xfs_ioctl.c:562:2: warning: unannotated fall-through between switch labels [-Wimplicit-fallthrough] fs/xfs/xfs_ioctl.c:1548:2: warning: unannotated fall-through between switch labels [-Wimplicit-fallthrough] fs/xfs/xfs_iomap.c:1040:2: warning: unannotated fall-through between switch labels [-Wimplicit-fallthrough] fs/xfs/xfs_inode.c:852:2: warning: unannotated fall-through between switch labels [-Wimplicit-fallthrough] fs/xfs/xfs_log.c:2627:2: warning: unannotated fall-through between switch labels [-Wimplicit-fallthrough] fs/xfs/xfs_trans_buf.c:298:2: warning: unannotated fall-through between switch labels [-Wimplicit-fallthrough] fs/xfs/scrub/bmap.c:275:2: warning: unannotated fall-through between switch labels [-Wimplicit-fallthrough] fs/xfs/scrub/btree.c:48:2: warning: unannotated fall-through between switch labels [-Wimplicit-fallthrough] fs/xfs/scrub/common.c:85:2: warning: unannotated fall-through between switch labels [-Wimplicit-fallthrough] fs/xfs/scrub/common.c:138:2: warning: unannotated fall-through between switch labels [-Wimplicit-fallthrough] fs/xfs/scrub/common.c:698:2: warning: unannotated fall-through between switch labels [-Wimplicit-fallthrough] fs/xfs/scrub/dabtree.c:51:2: warning: unannotated fall-through between switch labels [-Wimplicit-fallthrough] fs/xfs/scrub/repair.c:951:2: warning: unannotated fall-through between switch labels [-Wimplicit-fallthrough] fs/xfs/scrub/agheader.c:89:2: warning: unannotated fall-through between switch labels [-Wimplicit-fallthrough] Notice that Clang doesn't recognize /* fall through */ comments as implicit fall-through markings, so in order to globally enable -Wimplicit-fallthrough for Clang, these comments need to be replaced with fallthrough; in the whole codebase. Link: https://github.com/KSPP/linux/issues/115 Signed-off-by: Gustavo A. R. Silva <gustavoars@kernel.org>
2021-04-20 22:54:36 +00:00
fallthrough;
case XFS_DINODE_FMT_EXTENTS:
*nextents = xfs_bmap_count_leaves(ifp, count);
break;
}
return 0;
}
static int
xfs_getbmap_report_one(
struct xfs_inode *ip,
struct getbmapx *bmv,
struct kgetbmap *out,
int64_t bmv_end,
struct xfs_bmbt_irec *got)
{
struct kgetbmap *p = out + bmv->bmv_entries;
bool shared = false;
int error;
error = xfs_reflink_trim_around_shared(ip, got, &shared);
if (error)
return error;
if (isnullstartblock(got->br_startblock) ||
got->br_startblock == DELAYSTARTBLOCK) {
/*
* Delalloc extents that start beyond EOF can occur due to
* speculative EOF allocation when the delalloc extent is larger
* than the largest freespace extent at conversion time. These
* extents cannot be converted by data writeback, so can exist
* here even if we are not supposed to be finding delalloc
* extents.
*/
if (got->br_startoff < XFS_B_TO_FSB(ip->i_mount, XFS_ISIZE(ip)))
ASSERT((bmv->bmv_iflags & BMV_IF_DELALLOC) != 0);
p->bmv_oflags |= BMV_OF_DELALLOC;
p->bmv_block = -2;
} else {
p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
}
if (got->br_state == XFS_EXT_UNWRITTEN &&
(bmv->bmv_iflags & BMV_IF_PREALLOC))
p->bmv_oflags |= BMV_OF_PREALLOC;
if (shared)
p->bmv_oflags |= BMV_OF_SHARED;
p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
bmv->bmv_offset = p->bmv_offset + p->bmv_length;
bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
bmv->bmv_entries++;
return 0;
}
static void
xfs_getbmap_report_hole(
struct xfs_inode *ip,
struct getbmapx *bmv,
struct kgetbmap *out,
int64_t bmv_end,
xfs_fileoff_t bno,
xfs_fileoff_t end)
{
struct kgetbmap *p = out + bmv->bmv_entries;
if (bmv->bmv_iflags & BMV_IF_NO_HOLES)
return;
p->bmv_block = -1;
p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
bmv->bmv_offset = p->bmv_offset + p->bmv_length;
bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
bmv->bmv_entries++;
}
static inline bool
xfs_getbmap_full(
struct getbmapx *bmv)
{
return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1;
}
static bool
xfs_getbmap_next_rec(
struct xfs_bmbt_irec *rec,
xfs_fileoff_t total_end)
{
xfs_fileoff_t end = rec->br_startoff + rec->br_blockcount;
if (end == total_end)
return false;
rec->br_startoff += rec->br_blockcount;
if (!isnullstartblock(rec->br_startblock) &&
rec->br_startblock != DELAYSTARTBLOCK)
rec->br_startblock += rec->br_blockcount;
rec->br_blockcount = total_end - end;
return true;
}
/*
* Get inode's extents as described in bmv, and format for output.
* Calls formatter to fill the user's buffer until all extents
* are mapped, until the passed-in bmv->bmv_count slots have
* been filled, or until the formatter short-circuits the loop,
* if it is tracking filled-in extents on its own.
*/
int /* error code */
xfs_getbmap(
struct xfs_inode *ip,
struct getbmapx *bmv, /* user bmap structure */
struct kgetbmap *out)
{
struct xfs_mount *mp = ip->i_mount;
int iflags = bmv->bmv_iflags;
int whichfork, lock, error = 0;
int64_t bmv_end, max_len;
xfs_fileoff_t bno, first_bno;
struct xfs_ifork *ifp;
struct xfs_bmbt_irec got, rec;
xfs_filblks_t len;
struct xfs_iext_cursor icur;
if (bmv->bmv_iflags & ~BMV_IF_VALID)
return -EINVAL;
#ifndef DEBUG
/* Only allow CoW fork queries if we're debugging. */
if (iflags & BMV_IF_COWFORK)
return -EINVAL;
#endif
if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
return -EINVAL;
if (bmv->bmv_length < -1)
return -EINVAL;
bmv->bmv_entries = 0;
if (bmv->bmv_length == 0)
return 0;
if (iflags & BMV_IF_ATTRFORK)
whichfork = XFS_ATTR_FORK;
else if (iflags & BMV_IF_COWFORK)
whichfork = XFS_COW_FORK;
else
whichfork = XFS_DATA_FORK;
ifp = XFS_IFORK_PTR(ip, whichfork);
xfs_ilock(ip, XFS_IOLOCK_SHARED);
switch (whichfork) {
case XFS_ATTR_FORK:
if (!XFS_IFORK_Q(ip))
goto out_unlock_iolock;
max_len = 1LL << 32;
lock = xfs_ilock_attr_map_shared(ip);
break;
case XFS_COW_FORK:
/* No CoW fork? Just return */
if (!ifp)
goto out_unlock_iolock;
if (xfs_get_cowextsz_hint(ip))
max_len = mp->m_super->s_maxbytes;
else
max_len = XFS_ISIZE(ip);
lock = XFS_ILOCK_SHARED;
xfs_ilock(ip, lock);
break;
case XFS_DATA_FORK:
if (!(iflags & BMV_IF_DELALLOC) &&
(ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_disk_size)) {
error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
if (error)
goto out_unlock_iolock;
/*
* Even after flushing the inode, there can still be
* delalloc blocks on the inode beyond EOF due to
* speculative preallocation. These are not removed
* until the release function is called or the inode
* is inactivated. Hence we cannot assert here that
* ip->i_delayed_blks == 0.
*/
}
if (xfs_get_extsz_hint(ip) ||
(ip->i_diflags &
(XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))
max_len = mp->m_super->s_maxbytes;
else
max_len = XFS_ISIZE(ip);
lock = xfs_ilock_data_map_shared(ip);
break;
}
switch (ifp->if_format) {
case XFS_DINODE_FMT_EXTENTS:
case XFS_DINODE_FMT_BTREE:
break;
case XFS_DINODE_FMT_LOCAL:
/* Local format inode forks report no extents. */
goto out_unlock_ilock;
default:
error = -EINVAL;
goto out_unlock_ilock;
}
if (bmv->bmv_length == -1) {
max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len));
bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset);
}
bmv_end = bmv->bmv_offset + bmv->bmv_length;
first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
error = xfs_iread_extents(NULL, ip, whichfork);
if (error)
goto out_unlock_ilock;
if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
/*
* Report a whole-file hole if the delalloc flag is set to
* stay compatible with the old implementation.
*/
if (iflags & BMV_IF_DELALLOC)
xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
goto out_unlock_ilock;
}
while (!xfs_getbmap_full(bmv)) {
xfs_trim_extent(&got, first_bno, len);
/*
* Report an entry for a hole if this extent doesn't directly
* follow the previous one.
*/
if (got.br_startoff > bno) {
xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
got.br_startoff);
if (xfs_getbmap_full(bmv))
break;
}
/*
* In order to report shared extents accurately, we report each
* distinct shared / unshared part of a single bmbt record with
* an individual getbmapx record.
*/
bno = got.br_startoff + got.br_blockcount;
rec = got;
do {
error = xfs_getbmap_report_one(ip, bmv, out, bmv_end,
&rec);
if (error || xfs_getbmap_full(bmv))
goto out_unlock_ilock;
} while (xfs_getbmap_next_rec(&rec, bno));
if (!xfs_iext_next_extent(ifp, &icur, &got)) {
xfs_fileoff_t end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
out[bmv->bmv_entries - 1].bmv_oflags |= BMV_OF_LAST;
if (whichfork != XFS_ATTR_FORK && bno < end &&
!xfs_getbmap_full(bmv)) {
xfs_getbmap_report_hole(ip, bmv, out, bmv_end,
bno, end);
}
break;
}
if (bno >= first_bno + len)
break;
}
out_unlock_ilock:
xfs_iunlock(ip, lock);
out_unlock_iolock:
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
return error;
}
/*
* Dead simple method of punching delalyed allocation blocks from a range in
* the inode. This will always punch out both the start and end blocks, even
* if the ranges only partially overlap them, so it is up to the caller to
* ensure that partial blocks are not passed in.
*/
int
xfs_bmap_punch_delalloc_range(
struct xfs_inode *ip,
xfs_fileoff_t start_fsb,
xfs_fileoff_t length)
{
struct xfs_ifork *ifp = &ip->i_df;
xfs_fileoff_t end_fsb = start_fsb + length;
struct xfs_bmbt_irec got, del;
struct xfs_iext_cursor icur;
int error = 0;
ASSERT(!xfs_need_iread_extents(ifp));
xfs_ilock(ip, XFS_ILOCK_EXCL);
if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
goto out_unlock;
while (got.br_startoff + got.br_blockcount > start_fsb) {
del = got;
xfs_trim_extent(&del, start_fsb, length);
/*
* A delete can push the cursor forward. Step back to the
* previous extent on non-delalloc or extents outside the
* target range.
*/
if (!del.br_blockcount ||
!isnullstartblock(del.br_startblock)) {
if (!xfs_iext_prev_extent(ifp, &icur, &got))
break;
continue;
}
error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur,
&got, &del);
if (error || !xfs_iext_get_extent(ifp, &icur, &got))
break;
}
out_unlock:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
/*
* Test whether it is appropriate to check an inode for and free post EOF
* blocks. The 'force' parameter determines whether we should also consider
* regular files that are marked preallocated or append-only.
*/
bool
xfs_can_free_eofblocks(
struct xfs_inode *ip,
bool force)
{
struct xfs_bmbt_irec imap;
struct xfs_mount *mp = ip->i_mount;
xfs_fileoff_t end_fsb;
xfs_fileoff_t last_fsb;
int nimaps = 1;
int error;
/*
* Caller must either hold the exclusive io lock; or be inactivating
* the inode, which guarantees there are no other users of the inode.
*/
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL) ||
(VFS_I(ip)->i_state & I_FREEING));
/* prealloc/delalloc exists only on regular files */
if (!S_ISREG(VFS_I(ip)->i_mode))
return false;
/*
* Zero sized files with no cached pages and delalloc blocks will not
* have speculative prealloc/delalloc blocks to remove.
*/
if (VFS_I(ip)->i_size == 0 &&
VFS_I(ip)->i_mapping->nrpages == 0 &&
ip->i_delayed_blks == 0)
return false;
/* If we haven't read in the extent list, then don't do it now. */
if (xfs_need_iread_extents(&ip->i_df))
return false;
/*
* Do not free real preallocated or append-only files unless the file
* has delalloc blocks and we are forced to remove them.
*/
if (ip->i_diflags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
if (!force || ip->i_delayed_blks == 0)
return false;
/*
* Do not try to free post-EOF blocks if EOF is beyond the end of the
* range supported by the page cache, because the truncation will loop
* forever.
*/
end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
if (XFS_IS_REALTIME_INODE(ip) && mp->m_sb.sb_rextsize > 1)
end_fsb = roundup_64(end_fsb, mp->m_sb.sb_rextsize);
last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
if (last_fsb <= end_fsb)
return false;
/*
* Look up the mapping for the first block past EOF. If we can't find
* it, there's nothing to free.
*/
xfs_ilock(ip, XFS_ILOCK_SHARED);
error = xfs_bmapi_read(ip, end_fsb, last_fsb - end_fsb, &imap, &nimaps,
0);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
if (error || nimaps == 0)
return false;
/*
* If there's a real mapping there or there are delayed allocation
* reservations, then we have post-EOF blocks to try to free.
*/
return imap.br_startblock != HOLESTARTBLOCK || ip->i_delayed_blks;
}
/*
* This is called to free any blocks beyond eof. The caller must hold
* IOLOCK_EXCL unless we are in the inode reclaim path and have the only
* reference to the inode.
*/
int
xfs_free_eofblocks(
struct xfs_inode *ip)
{
struct xfs_trans *tp;
struct xfs_mount *mp = ip->i_mount;
int error;
/* Attach the dquots to the inode up front. */
error = xfs_qm_dqattach(ip);
if (error)
return error;
/* Wait on dio to ensure i_size has settled. */
inode_dio_wait(VFS_I(ip));
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
if (error) {
ASSERT(xfs_is_shutdown(mp));
return error;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
xfs: fix eofblocks race with file extending async dio writes It's possible for post-eof blocks to end up being used for direct I/O writes. dio write performs an upfront unwritten extent allocation, sends the dio and then updates the inode size (if necessary) on write completion. If a file release occurs while a file extending dio write is in flight, it is possible to mistake the post-eof blocks for speculative preallocation and incorrectly truncate them from the inode. This means that the resulting dio write completion can discover a hole and allocate new blocks rather than perform unwritten extent conversion. This requires a strange mix of I/O and is thus not likely to reproduce in real world workloads. It is intermittently reproduced by generic/299. The error manifests as an assert failure due to transaction overrun because the aforementioned write completion transaction has only reserved enough blocks for btree operations: XFS: Assertion failed: tp->t_blk_res_used <= tp->t_blk_res, \ file: fs/xfs//xfs_trans.c, line: 309 The root cause is that xfs_free_eofblocks() uses i_size to truncate post-eof blocks from the inode, but async, file extending direct writes do not update i_size until write completion, long after inode locks are dropped. Therefore, xfs_free_eofblocks() effectively truncates the inode to the incorrect size. Update xfs_free_eofblocks() to serialize against dio similar to how extending writes are serialized against i_size updates before post-eof block zeroing. Specifically, wait on dio while under the iolock. This ensures that dio write completions have updated i_size before post-eof blocks are processed. Signed-off-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2017-01-28 07:22:57 +00:00
/*
* Do not update the on-disk file size. If we update the on-disk file
* size and then the system crashes before the contents of the file are
* flushed to disk then the files may be full of holes (ie NULL files
* bug).
*/
error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK,
XFS_ISIZE(ip), XFS_BMAPI_NODISCARD);
if (error)
goto err_cancel;
error = xfs_trans_commit(tp);
if (error)
goto out_unlock;
xfs_inode_clear_eofblocks_tag(ip);
goto out_unlock;
err_cancel:
/*
* If we get an error at this point we simply don't
* bother truncating the file.
*/
xfs_trans_cancel(tp);
out_unlock:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
int
xfs_alloc_file_space(
struct xfs_inode *ip,
xfs_off_t offset,
xfs_off_t len)
{
xfs_mount_t *mp = ip->i_mount;
xfs_off_t count;
xfs_filblks_t allocated_fsb;
xfs_filblks_t allocatesize_fsb;
xfs_extlen_t extsz, temp;
xfs_fileoff_t startoffset_fsb;
xfs_fileoff_t endoffset_fsb;
int nimaps;
int rt;
xfs_trans_t *tp;
xfs_bmbt_irec_t imaps[1], *imapp;
int error;
trace_xfs_alloc_file_space(ip);
if (xfs_is_shutdown(mp))
return -EIO;
error = xfs_qm_dqattach(ip);
if (error)
return error;
if (len <= 0)
return -EINVAL;
rt = XFS_IS_REALTIME_INODE(ip);
extsz = xfs_get_extsz_hint(ip);
count = len;
imapp = &imaps[0];
nimaps = 1;
startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
endoffset_fsb = XFS_B_TO_FSB(mp, offset + count);
allocatesize_fsb = endoffset_fsb - startoffset_fsb;
/*
* Allocate file space until done or until there is an error
*/
while (allocatesize_fsb && !error) {
xfs_fileoff_t s, e;
unsigned int dblocks, rblocks, resblks;
/*
* Determine space reservations for data/realtime.
*/
if (unlikely(extsz)) {
s = startoffset_fsb;
do_div(s, extsz);
s *= extsz;
e = startoffset_fsb + allocatesize_fsb;
div_u64_rem(startoffset_fsb, extsz, &temp);
if (temp)
e += temp;
div_u64_rem(e, extsz, &temp);
if (temp)
e += extsz - temp;
} else {
s = 0;
e = allocatesize_fsb;
}
/*
* The transaction reservation is limited to a 32-bit block
* count, hence we need to limit the number of blocks we are
* trying to reserve to avoid an overflow. We can't allocate
* more than @nimaps extents, and an extent is limited on disk
* to XFS_BMBT_MAX_EXTLEN (21 bits), so use that to enforce the
* limit.
*/
resblks = min_t(xfs_fileoff_t, (e - s),
(XFS_MAX_BMBT_EXTLEN * nimaps));
if (unlikely(rt)) {
dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
rblocks = resblks;
} else {
dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
rblocks = 0;
}
error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write,
dblocks, rblocks, false, &tp);
if (error)
break;
error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
XFS_IEXT_ADD_NOSPLIT_CNT);
if (error == -EFBIG)
error = xfs_iext_count_upgrade(tp, ip,
XFS_IEXT_ADD_NOSPLIT_CNT);
if (error)
goto error;
error = xfs_bmapi_write(tp, ip, startoffset_fsb,
allocatesize_fsb, XFS_BMAPI_PREALLOC, 0, imapp,
&nimaps);
if (error)
goto error;
ip->i_diflags |= XFS_DIFLAG_PREALLOC;
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
error = xfs_trans_commit(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
if (error)
break;
allocated_fsb = imapp->br_blockcount;
if (nimaps == 0) {
error = -ENOSPC;
break;
}
startoffset_fsb += allocated_fsb;
allocatesize_fsb -= allocated_fsb;
}
return error;
error:
xfs_trans_cancel(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
static int
xfs_unmap_extent(
struct xfs_inode *ip,
xfs_fileoff_t startoffset_fsb,
xfs_filblks_t len_fsb,
int *done)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
int error;
error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks, 0,
false, &tp);
if (error)
return error;
error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
XFS_IEXT_PUNCH_HOLE_CNT);
if (error == -EFBIG)
error = xfs_iext_count_upgrade(tp, ip, XFS_IEXT_PUNCH_HOLE_CNT);
if (error)
goto out_trans_cancel;
error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, done);
if (error)
goto out_trans_cancel;
error = xfs_trans_commit(tp);
out_unlock:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
out_trans_cancel:
xfs_trans_cancel(tp);
goto out_unlock;
}
xfs: properly serialise fallocate against AIO+DIO AIO+DIO can extend the file size on IO completion, and it holds no inode locks while the IO is in flight. Therefore, a race condition exists in file size updates if we do something like this: aio-thread fallocate-thread lock inode submit IO beyond inode->i_size unlock inode ..... lock inode break layouts if (off + len > inode->i_size) new_size = off + len ..... inode_dio_wait() <blocks> ..... completes inode->i_size updated inode_dio_done() .... <wakes> <does stuff no long beyond EOF> if (new_size) xfs_vn_setattr(inode, new_size) Yup, that attempt to extend the file size in the fallocate code turns into a truncate - it removes the whatever the aio write allocated and put to disk, and reduced the inode size back down to where the fallocate operation ends. Fundamentally, xfs_file_fallocate() not compatible with racing AIO+DIO completions, so we need to move the inode_dio_wait() call up to where the lock the inode and break the layouts. Secondly, storing the inode size and then using it unchecked without holding the ILOCK is not safe; we can only do such a thing if we've locked out and drained all IO and other modification operations, which we don't do initially in xfs_file_fallocate. It should be noted that some of the fallocate operations are compound operations - they are made up of multiple manipulations that may zero data, and so we may need to flush and invalidate the file multiple times during an operation. However, we only need to lock out IO and other space manipulation operations once, as that lockout is maintained until the entire fallocate operation has been completed. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2019-10-29 20:04:32 +00:00
/* Caller must first wait for the completion of any pending DIOs if required. */
xfs: flush removing page cache in xfs_reflink_remap_prep On a sub-page block size filesystem, fsx is failing with a data corruption after a series of operations involving copying a file with the destination offset beyond EOF of the destination of the file: 8093(157 mod 256): TRUNCATE DOWN from 0x7a120 to 0x50000 ******WWWW 8094(158 mod 256): INSERT 0x25000 thru 0x25fff (0x1000 bytes) 8095(159 mod 256): COPY 0x18000 thru 0x1afff (0x3000 bytes) to 0x2f400 8096(160 mod 256): WRITE 0x5da00 thru 0x651ff (0x7800 bytes) HOLE 8097(161 mod 256): COPY 0x2000 thru 0x5fff (0x4000 bytes) to 0x6fc00 The second copy here is beyond EOF, and it is to sub-page (4k) but block aligned (1k) offset. The clone runs the EOF zeroing, landing in a pre-existing post-eof delalloc extent. This zeroes the post-eof extents in the page cache just fine, dirtying the pages correctly. The problem is that xfs_reflink_remap_prep() now truncates the page cache over the range that it is copying it to, and rounds that down to cover the entire start page. This removes the dirty page over the delalloc extent from the page cache without having written it back. Hence later, when the page cache is flushed, the page at offset 0x6f000 has not been written back and hence exposes stale data, which fsx trips over less than 10 operations later. Fix this by changing xfs_reflink_remap_prep() to use xfs_flush_unmap_range(). Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2018-11-19 21:31:10 +00:00
int
xfs_flush_unmap_range(
struct xfs_inode *ip,
xfs_off_t offset,
xfs_off_t len)
{
struct xfs_mount *mp = ip->i_mount;
struct inode *inode = VFS_I(ip);
xfs_off_t rounding, start, end;
int error;
rounding = max_t(xfs_off_t, mp->m_sb.sb_blocksize, PAGE_SIZE);
start = round_down(offset, rounding);
end = round_up(offset + len, rounding) - 1;
error = filemap_write_and_wait_range(inode->i_mapping, start, end);
if (error)
return error;
truncate_pagecache_range(inode, start, end);
return 0;
}
int
xfs_free_file_space(
struct xfs_inode *ip,
xfs_off_t offset,
xfs_off_t len)
{
struct xfs_mount *mp = ip->i_mount;
xfs_fileoff_t startoffset_fsb;
xfs_fileoff_t endoffset_fsb;
int done = 0, error;
trace_xfs_free_file_space(ip);
error = xfs_qm_dqattach(ip);
if (error)
return error;
if (len <= 0) /* if nothing being freed */
return 0;
startoffset_fsb = XFS_B_TO_FSB(mp, offset);
endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
/* We can only free complete realtime extents. */
if (XFS_IS_REALTIME_INODE(ip) && mp->m_sb.sb_rextsize > 1) {
startoffset_fsb = roundup_64(startoffset_fsb,
mp->m_sb.sb_rextsize);
endoffset_fsb = rounddown_64(endoffset_fsb,
mp->m_sb.sb_rextsize);
}
/*
* Need to zero the stuff we're not freeing, on disk.
*/
if (endoffset_fsb > startoffset_fsb) {
while (!done) {
error = xfs_unmap_extent(ip, startoffset_fsb,
endoffset_fsb - startoffset_fsb, &done);
if (error)
return error;
}
}
/*
* Now that we've unmap all full blocks we'll have to zero out any
* partial block at the beginning and/or end. xfs_zero_range is smart
* enough to skip any holes, including those we just created, but we
* must take care not to zero beyond EOF and enlarge i_size.
*/
if (offset >= XFS_ISIZE(ip))
return 0;
if (offset + len > XFS_ISIZE(ip))
len = XFS_ISIZE(ip) - offset;
error = xfs_zero_range(ip, offset, len, NULL);
if (error)
return error;
/*
* If we zeroed right up to EOF and EOF straddles a page boundary we
* must make sure that the post-EOF area is also zeroed because the
* page could be mmap'd and xfs_zero_range doesn't do that for us.
* Writeback of the eof page will do this, albeit clumsily.
*/
if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
round_down(offset + len, PAGE_SIZE), LLONG_MAX);
}
return error;
}
static int
xfs_prepare_shift(
struct xfs_inode *ip,
loff_t offset)
{
xfs: stabilize insert range start boundary to avoid COW writeback race generic/522 (fsx) occasionally fails with a file corruption due to an insert range operation. The primary characteristic of the corruption is a misplaced insert range operation that differs from the requested target offset. The reason for this behavior is a race between the extent shift sequence of an insert range and a COW writeback completion that causes a front merge with the first extent in the shift. The shift preparation function flushes and unmaps from the target offset of the operation to the end of the file to ensure no modifications can be made and page cache is invalidated before file data is shifted. An insert range operation then splits the extent at the target offset, if necessary, and begins to shift the start offset of each extent starting from the end of the file to the start offset. The shift sequence operates at extent level and so depends on the preparation sequence to guarantee no changes can be made to the target range during the shift. If the block immediately prior to the target offset was dirty and shared, however, it can undergo writeback and move from the COW fork to the data fork at any point during the shift. If the block is contiguous with the block at the start offset of the insert range, it can front merge and alter the start offset of the extent. Once the shift sequence reaches the target offset, it shifts based on the latest start offset and silently changes the target offset of the operation and corrupts the file. To address this problem, update the shift preparation code to stabilize the start boundary along with the full range of the insert. Also update the existing corruption check to fail if any extent is shifted with a start offset behind the target offset of the insert range. This prevents insert from racing with COW writeback completion and fails loudly in the event of an unexpected extent shift. Signed-off-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2019-12-11 21:18:38 +00:00
struct xfs_mount *mp = ip->i_mount;
int error;
xfs: writeback and inval. file range to be shifted by collapse The collapse range operation currently writes the entire file before starting the collapse to avoid changes in the in-core extent list due to writeback causing the extent count to change. Now that collapse range is fsb based rather than extent index based it can sustain changes in the extent list during the shift sequence without disruption. Modify xfs_collapse_file_space() to writeback and invalidate pages associated with the range of the file to be shifted. xfs_free_file_space() currently has similar behavior, but the space free need only affect the region of the file that is freed and this could change in the future. Also update the comments to reflect the current implementation. We retain the eofblocks trim permanently as a best option for dealing with delalloc extents. We don't shift delalloc extents because this scenario only occurs with post-eof preallocation (since data must be flushed such that the cache can be invalidated and data can be shifted). That means said space must also be initialized before being shifted into the accessible region of the file only to be immediately truncated off as the last part of the collapse. In other words, the eofblocks trim will happen anyways, we just run it first to ensure the file remains in a consistent state throughout the collapse. Finally, detect and fail explicitly in the event of a delalloc extent during the extent shift. The implementation does not support delalloc extents and the caller is expected to prevent this scenario in advance as is done by collapse. Signed-off-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2014-09-23 05:39:05 +00:00
/*
* Trim eofblocks to avoid shifting uninitialized post-eof preallocation
* into the accessible region of the file.
*/
if (xfs_can_free_eofblocks(ip, true)) {
error = xfs_free_eofblocks(ip);
if (error)
return error;
}
xfs: xfs_file_collapse_range is delalloc challenged If we have delalloc extents on a file before we run a collapse range opertaion, we sync the range that we are going to collapse to convert delalloc extents in that region to real extents to simplify the shift operation. However, the shift operation then assumes that the extent list is not going to change as it iterates over the extent list moving things about. Unfortunately, this isn't true because we can't hold the ILOCK over all the operations. We can prevent new IO from modifying the extent list by holding the IOLOCK, but that doesn't prevent writeback from running.... And when writeback runs, it can convert delalloc extents is the range of the file prior to the region being collapsed, and this changes the indexes of all the extents in the file. That causes the collapse range operation to Go Bad. The right fix is to rewrite the extent shift operation not to be dependent on the extent list not changing across the entire operation, but this is a fairly significant piece of work to do. Hence, as a short-term workaround for the problem, sync the entire file before starting a collapse operation to remove all delalloc ranges from the file and so avoid the problem of concurrent writeback changing the extent list. Diagnosed-and-Reported-by: Brian Foster <bfoster@redhat.com> Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dave Chinner <david@fromorbit.com>
2014-09-02 02:12:53 +00:00
xfs: stabilize insert range start boundary to avoid COW writeback race generic/522 (fsx) occasionally fails with a file corruption due to an insert range operation. The primary characteristic of the corruption is a misplaced insert range operation that differs from the requested target offset. The reason for this behavior is a race between the extent shift sequence of an insert range and a COW writeback completion that causes a front merge with the first extent in the shift. The shift preparation function flushes and unmaps from the target offset of the operation to the end of the file to ensure no modifications can be made and page cache is invalidated before file data is shifted. An insert range operation then splits the extent at the target offset, if necessary, and begins to shift the start offset of each extent starting from the end of the file to the start offset. The shift sequence operates at extent level and so depends on the preparation sequence to guarantee no changes can be made to the target range during the shift. If the block immediately prior to the target offset was dirty and shared, however, it can undergo writeback and move from the COW fork to the data fork at any point during the shift. If the block is contiguous with the block at the start offset of the insert range, it can front merge and alter the start offset of the extent. Once the shift sequence reaches the target offset, it shifts based on the latest start offset and silently changes the target offset of the operation and corrupts the file. To address this problem, update the shift preparation code to stabilize the start boundary along with the full range of the insert. Also update the existing corruption check to fail if any extent is shifted with a start offset behind the target offset of the insert range. This prevents insert from racing with COW writeback completion and fails loudly in the event of an unexpected extent shift. Signed-off-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2019-12-11 21:18:38 +00:00
/*
* Shift operations must stabilize the start block offset boundary along
* with the full range of the operation. If we don't, a COW writeback
* completion could race with an insert, front merge with the start
* extent (after split) during the shift and corrupt the file. Start
* with the block just prior to the start to stabilize the boundary.
*/
offset = round_down(offset, mp->m_sb.sb_blocksize);
xfs: stabilize insert range start boundary to avoid COW writeback race generic/522 (fsx) occasionally fails with a file corruption due to an insert range operation. The primary characteristic of the corruption is a misplaced insert range operation that differs from the requested target offset. The reason for this behavior is a race between the extent shift sequence of an insert range and a COW writeback completion that causes a front merge with the first extent in the shift. The shift preparation function flushes and unmaps from the target offset of the operation to the end of the file to ensure no modifications can be made and page cache is invalidated before file data is shifted. An insert range operation then splits the extent at the target offset, if necessary, and begins to shift the start offset of each extent starting from the end of the file to the start offset. The shift sequence operates at extent level and so depends on the preparation sequence to guarantee no changes can be made to the target range during the shift. If the block immediately prior to the target offset was dirty and shared, however, it can undergo writeback and move from the COW fork to the data fork at any point during the shift. If the block is contiguous with the block at the start offset of the insert range, it can front merge and alter the start offset of the extent. Once the shift sequence reaches the target offset, it shifts based on the latest start offset and silently changes the target offset of the operation and corrupts the file. To address this problem, update the shift preparation code to stabilize the start boundary along with the full range of the insert. Also update the existing corruption check to fail if any extent is shifted with a start offset behind the target offset of the insert range. This prevents insert from racing with COW writeback completion and fails loudly in the event of an unexpected extent shift. Signed-off-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2019-12-11 21:18:38 +00:00
if (offset)
offset -= mp->m_sb.sb_blocksize;
xfs: stabilize insert range start boundary to avoid COW writeback race generic/522 (fsx) occasionally fails with a file corruption due to an insert range operation. The primary characteristic of the corruption is a misplaced insert range operation that differs from the requested target offset. The reason for this behavior is a race between the extent shift sequence of an insert range and a COW writeback completion that causes a front merge with the first extent in the shift. The shift preparation function flushes and unmaps from the target offset of the operation to the end of the file to ensure no modifications can be made and page cache is invalidated before file data is shifted. An insert range operation then splits the extent at the target offset, if necessary, and begins to shift the start offset of each extent starting from the end of the file to the start offset. The shift sequence operates at extent level and so depends on the preparation sequence to guarantee no changes can be made to the target range during the shift. If the block immediately prior to the target offset was dirty and shared, however, it can undergo writeback and move from the COW fork to the data fork at any point during the shift. If the block is contiguous with the block at the start offset of the insert range, it can front merge and alter the start offset of the extent. Once the shift sequence reaches the target offset, it shifts based on the latest start offset and silently changes the target offset of the operation and corrupts the file. To address this problem, update the shift preparation code to stabilize the start boundary along with the full range of the insert. Also update the existing corruption check to fail if any extent is shifted with a start offset behind the target offset of the insert range. This prevents insert from racing with COW writeback completion and fails loudly in the event of an unexpected extent shift. Signed-off-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2019-12-11 21:18:38 +00:00
xfs: writeback and inval. file range to be shifted by collapse The collapse range operation currently writes the entire file before starting the collapse to avoid changes in the in-core extent list due to writeback causing the extent count to change. Now that collapse range is fsb based rather than extent index based it can sustain changes in the extent list during the shift sequence without disruption. Modify xfs_collapse_file_space() to writeback and invalidate pages associated with the range of the file to be shifted. xfs_free_file_space() currently has similar behavior, but the space free need only affect the region of the file that is freed and this could change in the future. Also update the comments to reflect the current implementation. We retain the eofblocks trim permanently as a best option for dealing with delalloc extents. We don't shift delalloc extents because this scenario only occurs with post-eof preallocation (since data must be flushed such that the cache can be invalidated and data can be shifted). That means said space must also be initialized before being shifted into the accessible region of the file only to be immediately truncated off as the last part of the collapse. In other words, the eofblocks trim will happen anyways, we just run it first to ensure the file remains in a consistent state throughout the collapse. Finally, detect and fail explicitly in the event of a delalloc extent during the extent shift. The implementation does not support delalloc extents and the caller is expected to prevent this scenario in advance as is done by collapse. Signed-off-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2014-09-23 05:39:05 +00:00
/*
* Writeback and invalidate cache for the remainder of the file as we're
* about to shift down every extent from offset to EOF.
xfs: writeback and inval. file range to be shifted by collapse The collapse range operation currently writes the entire file before starting the collapse to avoid changes in the in-core extent list due to writeback causing the extent count to change. Now that collapse range is fsb based rather than extent index based it can sustain changes in the extent list during the shift sequence without disruption. Modify xfs_collapse_file_space() to writeback and invalidate pages associated with the range of the file to be shifted. xfs_free_file_space() currently has similar behavior, but the space free need only affect the region of the file that is freed and this could change in the future. Also update the comments to reflect the current implementation. We retain the eofblocks trim permanently as a best option for dealing with delalloc extents. We don't shift delalloc extents because this scenario only occurs with post-eof preallocation (since data must be flushed such that the cache can be invalidated and data can be shifted). That means said space must also be initialized before being shifted into the accessible region of the file only to be immediately truncated off as the last part of the collapse. In other words, the eofblocks trim will happen anyways, we just run it first to ensure the file remains in a consistent state throughout the collapse. Finally, detect and fail explicitly in the event of a delalloc extent during the extent shift. The implementation does not support delalloc extents and the caller is expected to prevent this scenario in advance as is done by collapse. Signed-off-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2014-09-23 05:39:05 +00:00
*/
error = xfs_flush_unmap_range(ip, offset, XFS_ISIZE(ip));
if (error)
return error;
/*
* Clean out anything hanging around in the cow fork now that
* we've flushed all the dirty data out to disk to avoid having
* CoW extents at the wrong offsets.
*/
if (xfs_inode_has_cow_data(ip)) {
error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
true);
if (error)
return error;
}
return 0;
}
/*
* xfs_collapse_file_space()
* This routine frees disk space and shift extent for the given file.
* The first thing we do is to free data blocks in the specified range
* by calling xfs_free_file_space(). It would also sync dirty data
* and invalidate page cache over the region on which collapse range
* is working. And Shift extent records to the left to cover a hole.
* RETURNS:
* 0 on success
* errno on error
*
*/
int
xfs_collapse_file_space(
struct xfs_inode *ip,
xfs_off_t offset,
xfs_off_t len)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
int error;
xfs_fileoff_t next_fsb = XFS_B_TO_FSB(mp, offset + len);
xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
bool done = false;
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
trace_xfs_collapse_file_space(ip);
error = xfs_free_file_space(ip, offset, len);
if (error)
return error;
error = xfs_prepare_shift(ip, offset);
if (error)
return error;
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
if (error)
return error;
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
while (!done) {
error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
&done);
if (error)
goto out_trans_cancel;
if (done)
break;
/* finish any deferred frees and roll the transaction */
error = xfs_defer_finish(&tp);
if (error)
goto out_trans_cancel;
}
error = xfs_trans_commit(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
out_trans_cancel:
xfs_trans_cancel(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
/*
* xfs_insert_file_space()
* This routine create hole space by shifting extents for the given file.
* The first thing we do is to sync dirty data and invalidate page cache
* over the region on which insert range is working. And split an extent
* to two extents at given offset by calling xfs_bmap_split_extent.
* And shift all extent records which are laying between [offset,
* last allocated extent] to the right to reserve hole range.
* RETURNS:
* 0 on success
* errno on error
*/
int
xfs_insert_file_space(
struct xfs_inode *ip,
loff_t offset,
loff_t len)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
int error;
xfs_fileoff_t stop_fsb = XFS_B_TO_FSB(mp, offset);
xfs_fileoff_t next_fsb = NULLFSBLOCK;
xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
bool done = false;
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
trace_xfs_insert_file_space(ip);
error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
if (error)
return error;
error = xfs_prepare_shift(ip, offset);
if (error)
return error;
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
if (error)
return error;
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
XFS_IEXT_PUNCH_HOLE_CNT);
if (error == -EFBIG)
error = xfs_iext_count_upgrade(tp, ip, XFS_IEXT_PUNCH_HOLE_CNT);
if (error)
goto out_trans_cancel;
/*
* The extent shifting code works on extent granularity. So, if stop_fsb
* is not the starting block of extent, we need to split the extent at
* stop_fsb.
*/
error = xfs_bmap_split_extent(tp, ip, stop_fsb);
if (error)
goto out_trans_cancel;
do {
error = xfs_defer_finish(&tp);
if (error)
goto out_trans_cancel;
error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
&done, stop_fsb);
if (error)
goto out_trans_cancel;
} while (!done);
error = xfs_trans_commit(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
out_trans_cancel:
xfs_trans_cancel(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
/*
* We need to check that the format of the data fork in the temporary inode is
* valid for the target inode before doing the swap. This is not a problem with
* attr1 because of the fixed fork offset, but attr2 has a dynamically sized
* data fork depending on the space the attribute fork is taking so we can get
* invalid formats on the target inode.
*
* E.g. target has space for 7 extents in extent format, temp inode only has
* space for 6. If we defragment down to 7 extents, then the tmp format is a
* btree, but when swapped it needs to be in extent format. Hence we can't just
* blindly swap data forks on attr2 filesystems.
*
* Note that we check the swap in both directions so that we don't end up with
* a corrupt temporary inode, either.
*
* Note that fixing the way xfs_fsr sets up the attribute fork in the source
* inode will prevent this situation from occurring, so all we do here is
* reject and log the attempt. basically we are putting the responsibility on
* userspace to get this right.
*/
static int
xfs_swap_extents_check_format(
struct xfs_inode *ip, /* target inode */
struct xfs_inode *tip) /* tmp inode */
{
struct xfs_ifork *ifp = &ip->i_df;
struct xfs_ifork *tifp = &tip->i_df;
/* User/group/project quota ids must match if quotas are enforced. */
if (XFS_IS_QUOTA_ON(ip->i_mount) &&
(!uid_eq(VFS_I(ip)->i_uid, VFS_I(tip)->i_uid) ||
!gid_eq(VFS_I(ip)->i_gid, VFS_I(tip)->i_gid) ||
ip->i_projid != tip->i_projid))
return -EINVAL;
/* Should never get a local format */
if (ifp->if_format == XFS_DINODE_FMT_LOCAL ||
tifp->if_format == XFS_DINODE_FMT_LOCAL)
return -EINVAL;
/*
* if the target inode has less extents that then temporary inode then
* why did userspace call us?
*/
if (ifp->if_nextents < tifp->if_nextents)
return -EINVAL;
/*
* If we have to use the (expensive) rmap swap method, we can
* handle any number of extents and any format.
*/
if (xfs_has_rmapbt(ip->i_mount))
return 0;
/*
* if the target inode is in extent form and the temp inode is in btree
* form then we will end up with the target inode in the wrong format
* as we already know there are less extents in the temp inode.
*/
if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
tifp->if_format == XFS_DINODE_FMT_BTREE)
return -EINVAL;
/* Check temp in extent form to max in target */
if (tifp->if_format == XFS_DINODE_FMT_EXTENTS &&
tifp->if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
return -EINVAL;
/* Check target in extent form to max in temp */
if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
ifp->if_nextents > XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
return -EINVAL;
/*
* If we are in a btree format, check that the temp root block will fit
* in the target and that it has enough extents to be in btree format
* in the target.
*
* Note that we have to be careful to allow btree->extent conversions
* (a common defrag case) which will occur when the temp inode is in
* extent format...
*/
if (tifp->if_format == XFS_DINODE_FMT_BTREE) {
if (XFS_IFORK_Q(ip) &&
XFS_BMAP_BMDR_SPACE(tifp->if_broot) > XFS_IFORK_BOFF(ip))
return -EINVAL;
if (tifp->if_nextents <= XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
return -EINVAL;
}
/* Reciprocal target->temp btree format checks */
if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
if (XFS_IFORK_Q(tip) &&
XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
return -EINVAL;
if (ifp->if_nextents <= XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
return -EINVAL;
}
return 0;
}
static int
xfs_swap_extent_flush(
struct xfs_inode *ip)
{
int error;
error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
if (error)
return error;
truncate_pagecache_range(VFS_I(ip), 0, -1);
/* Verify O_DIRECT for ftmp */
if (VFS_I(ip)->i_mapping->nrpages)
return -EINVAL;
return 0;
}
/*
* Move extents from one file to another, when rmap is enabled.
*/
STATIC int
xfs_swap_extent_rmap(
struct xfs_trans **tpp,
struct xfs_inode *ip,
struct xfs_inode *tip)
{
struct xfs_trans *tp = *tpp;
struct xfs_bmbt_irec irec;
struct xfs_bmbt_irec uirec;
struct xfs_bmbt_irec tirec;
xfs_fileoff_t offset_fsb;
xfs_fileoff_t end_fsb;
xfs_filblks_t count_fsb;
int error;
xfs_filblks_t ilen;
xfs_filblks_t rlen;
int nimaps;
uint64_t tip_flags2;
/*
* If the source file has shared blocks, we must flag the donor
* file as having shared blocks so that we get the shared-block
* rmap functions when we go to fix up the rmaps. The flags
* will be switch for reals later.
*/
tip_flags2 = tip->i_diflags2;
if (ip->i_diflags2 & XFS_DIFLAG2_REFLINK)
tip->i_diflags2 |= XFS_DIFLAG2_REFLINK;
offset_fsb = 0;
end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
while (count_fsb) {
/* Read extent from the donor file */
nimaps = 1;
error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
&nimaps, 0);
if (error)
goto out;
ASSERT(nimaps == 1);
ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
trace_xfs_swap_extent_rmap_remap(tip, &tirec);
ilen = tirec.br_blockcount;
/* Unmap the old blocks in the source file. */
while (tirec.br_blockcount) {
ASSERT(tp->t_firstblock == NULLFSBLOCK);
trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
/* Read extent from the source file */
nimaps = 1;
error = xfs_bmapi_read(ip, tirec.br_startoff,
tirec.br_blockcount, &irec,
&nimaps, 0);
if (error)
goto out;
ASSERT(nimaps == 1);
ASSERT(tirec.br_startoff == irec.br_startoff);
trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
/* Trim the extent. */
uirec = tirec;
uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
tirec.br_blockcount,
irec.br_blockcount);
trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
if (xfs_bmap_is_real_extent(&uirec)) {
error = xfs_iext_count_may_overflow(ip,
XFS_DATA_FORK,
XFS_IEXT_SWAP_RMAP_CNT);
if (error == -EFBIG)
error = xfs_iext_count_upgrade(tp, ip,
XFS_IEXT_SWAP_RMAP_CNT);
if (error)
goto out;
}
if (xfs_bmap_is_real_extent(&irec)) {
error = xfs_iext_count_may_overflow(tip,
XFS_DATA_FORK,
XFS_IEXT_SWAP_RMAP_CNT);
if (error == -EFBIG)
error = xfs_iext_count_upgrade(tp, ip,
XFS_IEXT_SWAP_RMAP_CNT);
if (error)
goto out;
}
/* Remove the mapping from the donor file. */
xfs_bmap_unmap_extent(tp, tip, &uirec);
/* Remove the mapping from the source file. */
xfs_bmap_unmap_extent(tp, ip, &irec);
/* Map the donor file's blocks into the source file. */
xfs_bmap_map_extent(tp, ip, &uirec);
/* Map the source file's blocks into the donor file. */
xfs_bmap_map_extent(tp, tip, &irec);
error = xfs_defer_finish(tpp);
tp = *tpp;
if (error)
goto out;
tirec.br_startoff += rlen;
if (tirec.br_startblock != HOLESTARTBLOCK &&
tirec.br_startblock != DELAYSTARTBLOCK)
tirec.br_startblock += rlen;
tirec.br_blockcount -= rlen;
}
/* Roll on... */
count_fsb -= ilen;
offset_fsb += ilen;
}
tip->i_diflags2 = tip_flags2;
return 0;
out:
trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
tip->i_diflags2 = tip_flags2;
return error;
}
/* Swap the extents of two files by swapping data forks. */
STATIC int
xfs_swap_extent_forks(
struct xfs_trans *tp,
struct xfs_inode *ip,
struct xfs_inode *tip,
int *src_log_flags,
int *target_log_flags)
{
xfs_filblks_t aforkblks = 0;
xfs_filblks_t taforkblks = 0;
xfs_extnum_t junk;
uint64_t tmp;
int error;
/*
* Count the number of extended attribute blocks
*/
if (XFS_IFORK_Q(ip) && ip->i_afp->if_nextents > 0 &&
ip->i_afp->if_format != XFS_DINODE_FMT_LOCAL) {
error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
&aforkblks);
if (error)
return error;
}
if (XFS_IFORK_Q(tip) && tip->i_afp->if_nextents > 0 &&
tip->i_afp->if_format != XFS_DINODE_FMT_LOCAL) {
error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
&taforkblks);
if (error)
return error;
}
xfs: swap extents operations for CRC filesystems For CRC enabled filesystems, we can't just swap inode forks from one inode to another when defragmenting a file - the blocks in the inode fork bmap btree contain pointers back to the owner inode. Hence if we are to swap the inode forks we have to atomically modify every block in the btree during the transaction. We are doing an entire fork swap here, so we could create a new transaction item type that indicates we are changing the owner of a certain structure from one value to another. If we combine this with ordered buffer logging to modify all the buffers in the tree, then we can change the buffers in the tree without needing log space for the operation. However, this then requires log recovery to perform the modification of the owner information of the objects/structures in question. This does introduce some interesting ordering details into recovery: we have to make sure that the owner change replay occurs after the change that moves the objects is made, not before. Hence we can't use a separate log item for this as we have no guarantee of strict ordering between multiple items in the log due to the relogging action of asynchronous transaction commits. Hence there is no "generic" method we can use for changing the ownership of arbitrary metadata structures. For inode forks, however, there is a simple method of communicating that the fork contents need the owner rewritten - we can pass a inode log format flag for the fork for the transaction that does a fork swap. This flag will then follow the inode fork through relogging actions so when the swap actually gets replayed the ownership can be changed immediately by log recovery. So that gives us a simple method of "whole fork" exchange between two inodes. This is relatively simple to implement, so it makes sense to do this as an initial implementation to support xfs_fsr on CRC enabled filesytems in the same manner as we do on existing filesystems. This commit introduces the swapext driven functionality, the recovery functionality will be in a separate patch. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Mark Tinguely <tinguely@sgi.com> Signed-off-by: Ben Myers <bpm@sgi.com>
2013-08-30 00:23:44 +00:00
/*
xfs: move bmbt owner change to last step of extent swap The extent swap operation currently resets bmbt block owners before the inode forks are swapped. The bmbt buffers are marked as ordered so they do not have to be physically logged in the transaction. This use of ordered buffers is not safe as bmbt buffers may have been previously physically logged. The bmbt owner change algorithm needs to be updated to physically log buffers that are already dirty when/if they are encountered. This means that an extent swap will eventually require multiple rolling transactions to handle large btrees. In addition, all inode related changes must be logged before the bmbt owner change scan begins and can roll the transaction for the first time to preserve fs consistency via log recovery. In preparation for such fixes to the bmbt owner change algorithm, refactor the bmbt scan out of the extent fork swap code to the last operation before the transaction is committed. Update xfs_swap_extent_forks() to only set the inode log flags when an owner change scan is necessary. Update xfs_swap_extents() to trigger the owner change based on the inode log flags. Note that since the owner change now occurs after the extent fork swap, the inode btrees must be fixed up with the inode number of the current inode (similar to log recovery). Signed-off-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2017-08-29 17:08:39 +00:00
* Btree format (v3) inodes have the inode number stamped in the bmbt
* block headers. We can't start changing the bmbt blocks until the
* inode owner change is logged so recovery does the right thing in the
* event of a crash. Set the owner change log flags now and leave the
* bmbt scan as the last step.
xfs: swap extents operations for CRC filesystems For CRC enabled filesystems, we can't just swap inode forks from one inode to another when defragmenting a file - the blocks in the inode fork bmap btree contain pointers back to the owner inode. Hence if we are to swap the inode forks we have to atomically modify every block in the btree during the transaction. We are doing an entire fork swap here, so we could create a new transaction item type that indicates we are changing the owner of a certain structure from one value to another. If we combine this with ordered buffer logging to modify all the buffers in the tree, then we can change the buffers in the tree without needing log space for the operation. However, this then requires log recovery to perform the modification of the owner information of the objects/structures in question. This does introduce some interesting ordering details into recovery: we have to make sure that the owner change replay occurs after the change that moves the objects is made, not before. Hence we can't use a separate log item for this as we have no guarantee of strict ordering between multiple items in the log due to the relogging action of asynchronous transaction commits. Hence there is no "generic" method we can use for changing the ownership of arbitrary metadata structures. For inode forks, however, there is a simple method of communicating that the fork contents need the owner rewritten - we can pass a inode log format flag for the fork for the transaction that does a fork swap. This flag will then follow the inode fork through relogging actions so when the swap actually gets replayed the ownership can be changed immediately by log recovery. So that gives us a simple method of "whole fork" exchange between two inodes. This is relatively simple to implement, so it makes sense to do this as an initial implementation to support xfs_fsr on CRC enabled filesytems in the same manner as we do on existing filesystems. This commit introduces the swapext driven functionality, the recovery functionality will be in a separate patch. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Mark Tinguely <tinguely@sgi.com> Signed-off-by: Ben Myers <bpm@sgi.com>
2013-08-30 00:23:44 +00:00
*/
if (xfs_has_v3inodes(ip->i_mount)) {
if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE)
(*target_log_flags) |= XFS_ILOG_DOWNER;
if (tip->i_df.if_format == XFS_DINODE_FMT_BTREE)
(*src_log_flags) |= XFS_ILOG_DOWNER;
}
xfs: swap extents operations for CRC filesystems For CRC enabled filesystems, we can't just swap inode forks from one inode to another when defragmenting a file - the blocks in the inode fork bmap btree contain pointers back to the owner inode. Hence if we are to swap the inode forks we have to atomically modify every block in the btree during the transaction. We are doing an entire fork swap here, so we could create a new transaction item type that indicates we are changing the owner of a certain structure from one value to another. If we combine this with ordered buffer logging to modify all the buffers in the tree, then we can change the buffers in the tree without needing log space for the operation. However, this then requires log recovery to perform the modification of the owner information of the objects/structures in question. This does introduce some interesting ordering details into recovery: we have to make sure that the owner change replay occurs after the change that moves the objects is made, not before. Hence we can't use a separate log item for this as we have no guarantee of strict ordering between multiple items in the log due to the relogging action of asynchronous transaction commits. Hence there is no "generic" method we can use for changing the ownership of arbitrary metadata structures. For inode forks, however, there is a simple method of communicating that the fork contents need the owner rewritten - we can pass a inode log format flag for the fork for the transaction that does a fork swap. This flag will then follow the inode fork through relogging actions so when the swap actually gets replayed the ownership can be changed immediately by log recovery. So that gives us a simple method of "whole fork" exchange between two inodes. This is relatively simple to implement, so it makes sense to do this as an initial implementation to support xfs_fsr on CRC enabled filesytems in the same manner as we do on existing filesystems. This commit introduces the swapext driven functionality, the recovery functionality will be in a separate patch. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Mark Tinguely <tinguely@sgi.com> Signed-off-by: Ben Myers <bpm@sgi.com>
2013-08-30 00:23:44 +00:00
/*
* Swap the data forks of the inodes
*/
swap(ip->i_df, tip->i_df);
/*
* Fix the on-disk inode values
*/
tmp = (uint64_t)ip->i_nblocks;
ip->i_nblocks = tip->i_nblocks - taforkblks + aforkblks;
tip->i_nblocks = tmp + taforkblks - aforkblks;
/*
* The extents in the source inode could still contain speculative
* preallocation beyond EOF (e.g. the file is open but not modified
* while defrag is in progress). In that case, we need to copy over the
* number of delalloc blocks the data fork in the source inode is
* tracking beyond EOF so that when the fork is truncated away when the
* temporary inode is unlinked we don't underrun the i_delayed_blks
* counter on that inode.
*/
ASSERT(tip->i_delayed_blks == 0);
tip->i_delayed_blks = ip->i_delayed_blks;
ip->i_delayed_blks = 0;
switch (ip->i_df.if_format) {
case XFS_DINODE_FMT_EXTENTS:
(*src_log_flags) |= XFS_ILOG_DEXT;
break;
case XFS_DINODE_FMT_BTREE:
ASSERT(!xfs_has_v3inodes(ip->i_mount) ||
(*src_log_flags & XFS_ILOG_DOWNER));
(*src_log_flags) |= XFS_ILOG_DBROOT;
break;
}
switch (tip->i_df.if_format) {
case XFS_DINODE_FMT_EXTENTS:
(*target_log_flags) |= XFS_ILOG_DEXT;
break;
case XFS_DINODE_FMT_BTREE:
(*target_log_flags) |= XFS_ILOG_DBROOT;
ASSERT(!xfs_has_v3inodes(ip->i_mount) ||
(*target_log_flags & XFS_ILOG_DOWNER));
break;
}
return 0;
}
/*
* Fix up the owners of the bmbt blocks to refer to the current inode. The
* change owner scan attempts to order all modified buffers in the current
* transaction. In the event of ordered buffer failure, the offending buffer is
* physically logged as a fallback and the scan returns -EAGAIN. We must roll
* the transaction in this case to replenish the fallback log reservation and
* restart the scan. This process repeats until the scan completes.
*/
static int
xfs_swap_change_owner(
struct xfs_trans **tpp,
struct xfs_inode *ip,
struct xfs_inode *tmpip)
{
int error;
struct xfs_trans *tp = *tpp;
do {
error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
NULL);
/* success or fatal error */
if (error != -EAGAIN)
break;
error = xfs_trans_roll(tpp);
if (error)
break;
tp = *tpp;
/*
* Redirty both inodes so they can relog and keep the log tail
* moving forward.
*/
xfs_trans_ijoin(tp, ip, 0);
xfs_trans_ijoin(tp, tmpip, 0);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
} while (true);
return error;
}
int
xfs_swap_extents(
struct xfs_inode *ip, /* target inode */
struct xfs_inode *tip, /* tmp inode */
struct xfs_swapext *sxp)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
struct xfs_bstat *sbp = &sxp->sx_stat;
int src_log_flags, target_log_flags;
int error = 0;
uint64_t f;
int resblks = 0;
xfs: preserve rmapbt swapext block reservation from freed blocks The rmapbt extent swap algorithm remaps individual extents between the source inode and the target to trigger reverse mapping metadata updates. If either inode straddles a format or other bmap allocation boundary, the individual unmap and map cycles can trigger repeated bmap block allocations and frees as the extent count bounces back and forth across the boundary. While net block usage is bound across the swap operation, this behavior can prematurely exhaust the transaction block reservation because it continuously drains as the transaction rolls. Each allocation accounts against the reservation and each free returns to global free space on transaction roll. The previous workaround to this problem attempted to detect this boundary condition and provide surplus block reservation to acommodate it. This is insufficient because more remaps can occur than implied by the extent counts; if start offset boundaries are not aligned between the two inodes, for example. To address this problem more generically and dynamically, add a transaction accounting mode that returns freed blocks to the transaction reservation instead of the superblock counters on transaction roll and use it when the rmapbt based algorithm is active. This allows the chain of remap transactions to preserve the block reservation based own its own frees and prevent premature exhaustion regardless of the remap pattern. Note that this is only safe for superblocks with lazy sb accounting, but the latter is required for v5 supers and the rmap feature depends on v5. Fixes: b3fed434822d0 ("xfs: account format bouncing into rmapbt swapext tx reservation") Root-caused-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2020-06-29 21:44:36 +00:00
unsigned int flags = 0;
/*
* Lock the inodes against other IO, page faults and truncate to
* begin with. Then we can ensure the inodes are flushed and have no
* page cache safely. Once we have done this we can take the ilocks and
* do the rest of the checks.
*/
lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
filemap_invalidate_lock_two(VFS_I(ip)->i_mapping,
VFS_I(tip)->i_mapping);
/* Verify that both files have the same format */
if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
error = -EINVAL;
goto out_unlock;
}
/* Verify both files are either real-time or non-realtime */
if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
error = -EINVAL;
goto out_unlock;
}
error = xfs_qm_dqattach(ip);
if (error)
goto out_unlock;
error = xfs_qm_dqattach(tip);
if (error)
goto out_unlock;
error = xfs_swap_extent_flush(ip);
if (error)
goto out_unlock;
error = xfs_swap_extent_flush(tip);
if (error)
goto out_unlock;
xfs: cancel COW blocks before swapext We need to make sure we have no outstanding COW blocks before we swap extents, as there is nothing preventing us from having preallocated COW delalloc on either inode that swapext is called on. That case can easily be reproduced by running generic/324 in always_cow mode: [ 620.760572] XFS: Assertion failed: tip->i_delayed_blks == 0, file: fs/xfs/xfs_bmap_util.c, line: 1669 [ 620.761608] ------------[ cut here ]------------ [ 620.762171] kernel BUG at fs/xfs/xfs_message.c:102! [ 620.762732] invalid opcode: 0000 [#1] SMP PTI [ 620.763272] CPU: 0 PID: 24153 Comm: xfs_fsr Tainted: G W 4.19.0-rc1+ #4182 [ 620.764203] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.11.1-1 04/01/2014 [ 620.765202] RIP: 0010:assfail+0x20/0x28 [ 620.765646] Code: 31 ff e8 83 fc ff ff 0f 0b c3 48 89 f1 41 89 d0 48 c7 c6 48 ca 8d 82 48 89 fa 38 [ 620.767758] RSP: 0018:ffffc9000898bc10 EFLAGS: 00010202 [ 620.768359] RAX: 0000000000000000 RBX: ffff88012f14ba40 RCX: 0000000000000000 [ 620.769174] RDX: 00000000ffffffc0 RSI: 000000000000000a RDI: ffffffff828560d9 [ 620.769982] RBP: ffff88012f14b300 R08: 0000000000000000 R09: 0000000000000000 [ 620.770788] R10: 000000000000000a R11: f000000000000000 R12: ffffc9000898bc98 [ 620.771638] R13: ffffc9000898bc9c R14: ffff880130b5e2b8 R15: ffff88012a1fa2a8 [ 620.772504] FS: 00007fdc36e0fbc0(0000) GS:ffff88013ba00000(0000) knlGS:0000000000000000 [ 620.773475] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 620.774168] CR2: 00007fdc3604d000 CR3: 0000000132afc000 CR4: 00000000000006f0 [ 620.774978] Call Trace: [ 620.775274] xfs_swap_extent_forks+0x2a0/0x2e0 [ 620.775792] xfs_swap_extents+0x38b/0xab0 [ 620.776256] xfs_ioc_swapext+0x121/0x140 [ 620.776709] xfs_file_ioctl+0x328/0xc90 [ 620.777154] ? rcu_read_lock_sched_held+0x50/0x60 [ 620.777694] ? xfs_iunlock+0x233/0x260 [ 620.778127] ? xfs_setattr_nonsize+0x3be/0x6a0 [ 620.778647] do_vfs_ioctl+0x9d/0x680 [ 620.779071] ? ksys_fchown+0x47/0x80 [ 620.779552] ksys_ioctl+0x35/0x70 [ 620.780040] __x64_sys_ioctl+0x11/0x20 [ 620.780530] do_syscall_64+0x4b/0x190 [ 620.780927] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 620.781467] RIP: 0033:0x7fdc364d0f07 [ 620.781900] Code: b3 66 90 48 8b 05 81 5f 2c 00 64 c7 00 26 00 00 00 48 c7 c0 ff ff ff ff c3 66 28 [ 620.784044] RSP: 002b:00007ffe2a766038 EFLAGS: 00000246 ORIG_RAX: 0000000000000010 [ 620.784896] RAX: ffffffffffffffda RBX: 0000000000000025 RCX: 00007fdc364d0f07 [ 620.785667] RDX: 0000560296ca2fc0 RSI: 00000000c0c0586d RDI: 0000000000000005 [ 620.786398] RBP: 0000000000000025 R08: 0000000000001200 R09: 0000000000000000 [ 620.787283] R10: 0000000000000432 R11: 0000000000000246 R12: 0000000000000005 [ 620.788051] R13: 0000000000000000 R14: 0000000000001000 R15: 0000000000000006 [ 620.788927] Modules linked in: [ 620.789340] ---[ end trace 9503b7417ffdbdb0 ]--- [ 620.790065] RIP: 0010:assfail+0x20/0x28 [ 620.790642] Code: 31 ff e8 83 fc ff ff 0f 0b c3 48 89 f1 41 89 d0 48 c7 c6 48 ca 8d 82 48 89 fa 38 [ 620.793038] RSP: 0018:ffffc9000898bc10 EFLAGS: 00010202 [ 620.793609] RAX: 0000000000000000 RBX: ffff88012f14ba40 RCX: 0000000000000000 [ 620.794317] RDX: 00000000ffffffc0 RSI: 000000000000000a RDI: ffffffff828560d9 [ 620.795025] RBP: ffff88012f14b300 R08: 0000000000000000 R09: 0000000000000000 [ 620.795778] R10: 000000000000000a R11: f000000000000000 R12: ffffc9000898bc98 [ 620.796675] R13: ffffc9000898bc9c R14: ffff880130b5e2b8 R15: ffff88012a1fa2a8 [ 620.797782] FS: 00007fdc36e0fbc0(0000) GS:ffff88013ba00000(0000) knlGS:0000000000000000 [ 620.798908] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 620.799594] CR2: 00007fdc3604d000 CR3: 0000000132afc000 CR4: 00000000000006f0 [ 620.800424] Kernel panic - not syncing: Fatal exception [ 620.801191] Kernel Offset: disabled [ 620.801597] ---[ end Kernel panic - not syncing: Fatal exception ]--- Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2018-10-18 06:21:55 +00:00
if (xfs_inode_has_cow_data(tip)) {
error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true);
if (error)
goto out_unlock;
xfs: cancel COW blocks before swapext We need to make sure we have no outstanding COW blocks before we swap extents, as there is nothing preventing us from having preallocated COW delalloc on either inode that swapext is called on. That case can easily be reproduced by running generic/324 in always_cow mode: [ 620.760572] XFS: Assertion failed: tip->i_delayed_blks == 0, file: fs/xfs/xfs_bmap_util.c, line: 1669 [ 620.761608] ------------[ cut here ]------------ [ 620.762171] kernel BUG at fs/xfs/xfs_message.c:102! [ 620.762732] invalid opcode: 0000 [#1] SMP PTI [ 620.763272] CPU: 0 PID: 24153 Comm: xfs_fsr Tainted: G W 4.19.0-rc1+ #4182 [ 620.764203] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.11.1-1 04/01/2014 [ 620.765202] RIP: 0010:assfail+0x20/0x28 [ 620.765646] Code: 31 ff e8 83 fc ff ff 0f 0b c3 48 89 f1 41 89 d0 48 c7 c6 48 ca 8d 82 48 89 fa 38 [ 620.767758] RSP: 0018:ffffc9000898bc10 EFLAGS: 00010202 [ 620.768359] RAX: 0000000000000000 RBX: ffff88012f14ba40 RCX: 0000000000000000 [ 620.769174] RDX: 00000000ffffffc0 RSI: 000000000000000a RDI: ffffffff828560d9 [ 620.769982] RBP: ffff88012f14b300 R08: 0000000000000000 R09: 0000000000000000 [ 620.770788] R10: 000000000000000a R11: f000000000000000 R12: ffffc9000898bc98 [ 620.771638] R13: ffffc9000898bc9c R14: ffff880130b5e2b8 R15: ffff88012a1fa2a8 [ 620.772504] FS: 00007fdc36e0fbc0(0000) GS:ffff88013ba00000(0000) knlGS:0000000000000000 [ 620.773475] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 620.774168] CR2: 00007fdc3604d000 CR3: 0000000132afc000 CR4: 00000000000006f0 [ 620.774978] Call Trace: [ 620.775274] xfs_swap_extent_forks+0x2a0/0x2e0 [ 620.775792] xfs_swap_extents+0x38b/0xab0 [ 620.776256] xfs_ioc_swapext+0x121/0x140 [ 620.776709] xfs_file_ioctl+0x328/0xc90 [ 620.777154] ? rcu_read_lock_sched_held+0x50/0x60 [ 620.777694] ? xfs_iunlock+0x233/0x260 [ 620.778127] ? xfs_setattr_nonsize+0x3be/0x6a0 [ 620.778647] do_vfs_ioctl+0x9d/0x680 [ 620.779071] ? ksys_fchown+0x47/0x80 [ 620.779552] ksys_ioctl+0x35/0x70 [ 620.780040] __x64_sys_ioctl+0x11/0x20 [ 620.780530] do_syscall_64+0x4b/0x190 [ 620.780927] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 620.781467] RIP: 0033:0x7fdc364d0f07 [ 620.781900] Code: b3 66 90 48 8b 05 81 5f 2c 00 64 c7 00 26 00 00 00 48 c7 c0 ff ff ff ff c3 66 28 [ 620.784044] RSP: 002b:00007ffe2a766038 EFLAGS: 00000246 ORIG_RAX: 0000000000000010 [ 620.784896] RAX: ffffffffffffffda RBX: 0000000000000025 RCX: 00007fdc364d0f07 [ 620.785667] RDX: 0000560296ca2fc0 RSI: 00000000c0c0586d RDI: 0000000000000005 [ 620.786398] RBP: 0000000000000025 R08: 0000000000001200 R09: 0000000000000000 [ 620.787283] R10: 0000000000000432 R11: 0000000000000246 R12: 0000000000000005 [ 620.788051] R13: 0000000000000000 R14: 0000000000001000 R15: 0000000000000006 [ 620.788927] Modules linked in: [ 620.789340] ---[ end trace 9503b7417ffdbdb0 ]--- [ 620.790065] RIP: 0010:assfail+0x20/0x28 [ 620.790642] Code: 31 ff e8 83 fc ff ff 0f 0b c3 48 89 f1 41 89 d0 48 c7 c6 48 ca 8d 82 48 89 fa 38 [ 620.793038] RSP: 0018:ffffc9000898bc10 EFLAGS: 00010202 [ 620.793609] RAX: 0000000000000000 RBX: ffff88012f14ba40 RCX: 0000000000000000 [ 620.794317] RDX: 00000000ffffffc0 RSI: 000000000000000a RDI: ffffffff828560d9 [ 620.795025] RBP: ffff88012f14b300 R08: 0000000000000000 R09: 0000000000000000 [ 620.795778] R10: 000000000000000a R11: f000000000000000 R12: ffffc9000898bc98 [ 620.796675] R13: ffffc9000898bc9c R14: ffff880130b5e2b8 R15: ffff88012a1fa2a8 [ 620.797782] FS: 00007fdc36e0fbc0(0000) GS:ffff88013ba00000(0000) knlGS:0000000000000000 [ 620.798908] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 620.799594] CR2: 00007fdc3604d000 CR3: 0000000132afc000 CR4: 00000000000006f0 [ 620.800424] Kernel panic - not syncing: Fatal exception [ 620.801191] Kernel Offset: disabled [ 620.801597] ---[ end Kernel panic - not syncing: Fatal exception ]--- Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
2018-10-18 06:21:55 +00:00
}
/*
* Extent "swapping" with rmap requires a permanent reservation and
* a block reservation because it's really just a remap operation
* performed with log redo items!
*/
if (xfs_has_rmapbt(mp)) {
int w = XFS_DATA_FORK;
uint32_t ipnext = ip->i_df.if_nextents;
uint32_t tipnext = tip->i_df.if_nextents;
xfs: account format bouncing into rmapbt swapext tx reservation The extent swap mechanism requires a unique implementation for rmapbt enabled filesystems. Because the rmapbt tracks extent owner information, extent swap must individually unmap and remap each extent between the two inodes. The rmapbt extent swap transaction block reservation currently accounts for the worst case bmapbt block and rmapbt block consumption based on the extent count of each inode. There is a corner case that exists due to the extent swap implementation that is not covered by this reservation, however. If one of the associated inodes is just over the max extent count used for extent format inodes (i.e., the inode is in btree format by a single extent), the unmap/remap cycle of the extent swap can bounce the inode between extent and btree format multiple times, almost as many times as there are extents in the inode (if the opposing inode happens to have one less, for example). Each back and forth cycle involves a block free and allocation, which isn't a problem except for that the initial transaction reservation must account for the total number of block allocations performed by the chain of deferred operations. If not, a block reservation overrun occurs and the filesystem shuts down. Update the rmapbt extent swap block reservation to check for this situation and add some block reservation slop to ensure the entire operation succeeds. We'd never likely require reservation for both inodes as fsr wouldn't defrag the file in that case, but the additional reservation is constrained by the data fork size so be cautious and check for both. Signed-off-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2018-03-09 22:01:58 +00:00
/*
* Conceptually this shouldn't affect the shape of either bmbt,
* but since we atomically move extents one by one, we reserve
* enough space to rebuild both trees.
*/
resblks = XFS_SWAP_RMAP_SPACE_RES(mp, ipnext, w);
resblks += XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w);
/*
xfs: preserve rmapbt swapext block reservation from freed blocks The rmapbt extent swap algorithm remaps individual extents between the source inode and the target to trigger reverse mapping metadata updates. If either inode straddles a format or other bmap allocation boundary, the individual unmap and map cycles can trigger repeated bmap block allocations and frees as the extent count bounces back and forth across the boundary. While net block usage is bound across the swap operation, this behavior can prematurely exhaust the transaction block reservation because it continuously drains as the transaction rolls. Each allocation accounts against the reservation and each free returns to global free space on transaction roll. The previous workaround to this problem attempted to detect this boundary condition and provide surplus block reservation to acommodate it. This is insufficient because more remaps can occur than implied by the extent counts; if start offset boundaries are not aligned between the two inodes, for example. To address this problem more generically and dynamically, add a transaction accounting mode that returns freed blocks to the transaction reservation instead of the superblock counters on transaction roll and use it when the rmapbt based algorithm is active. This allows the chain of remap transactions to preserve the block reservation based own its own frees and prevent premature exhaustion regardless of the remap pattern. Note that this is only safe for superblocks with lazy sb accounting, but the latter is required for v5 supers and the rmap feature depends on v5. Fixes: b3fed434822d0 ("xfs: account format bouncing into rmapbt swapext tx reservation") Root-caused-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2020-06-29 21:44:36 +00:00
* If either inode straddles a bmapbt block allocation boundary,
* the rmapbt algorithm triggers repeated allocs and frees as
* extents are remapped. This can exhaust the block reservation
* prematurely and cause shutdown. Return freed blocks to the
* transaction reservation to counter this behavior.
*/
xfs: preserve rmapbt swapext block reservation from freed blocks The rmapbt extent swap algorithm remaps individual extents between the source inode and the target to trigger reverse mapping metadata updates. If either inode straddles a format or other bmap allocation boundary, the individual unmap and map cycles can trigger repeated bmap block allocations and frees as the extent count bounces back and forth across the boundary. While net block usage is bound across the swap operation, this behavior can prematurely exhaust the transaction block reservation because it continuously drains as the transaction rolls. Each allocation accounts against the reservation and each free returns to global free space on transaction roll. The previous workaround to this problem attempted to detect this boundary condition and provide surplus block reservation to acommodate it. This is insufficient because more remaps can occur than implied by the extent counts; if start offset boundaries are not aligned between the two inodes, for example. To address this problem more generically and dynamically, add a transaction accounting mode that returns freed blocks to the transaction reservation instead of the superblock counters on transaction roll and use it when the rmapbt based algorithm is active. This allows the chain of remap transactions to preserve the block reservation based own its own frees and prevent premature exhaustion regardless of the remap pattern. Note that this is only safe for superblocks with lazy sb accounting, but the latter is required for v5 supers and the rmap feature depends on v5. Fixes: b3fed434822d0 ("xfs: account format bouncing into rmapbt swapext tx reservation") Root-caused-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2020-06-29 21:44:36 +00:00
flags |= XFS_TRANS_RES_FDBLKS;
}
xfs: preserve rmapbt swapext block reservation from freed blocks The rmapbt extent swap algorithm remaps individual extents between the source inode and the target to trigger reverse mapping metadata updates. If either inode straddles a format or other bmap allocation boundary, the individual unmap and map cycles can trigger repeated bmap block allocations and frees as the extent count bounces back and forth across the boundary. While net block usage is bound across the swap operation, this behavior can prematurely exhaust the transaction block reservation because it continuously drains as the transaction rolls. Each allocation accounts against the reservation and each free returns to global free space on transaction roll. The previous workaround to this problem attempted to detect this boundary condition and provide surplus block reservation to acommodate it. This is insufficient because more remaps can occur than implied by the extent counts; if start offset boundaries are not aligned between the two inodes, for example. To address this problem more generically and dynamically, add a transaction accounting mode that returns freed blocks to the transaction reservation instead of the superblock counters on transaction roll and use it when the rmapbt based algorithm is active. This allows the chain of remap transactions to preserve the block reservation based own its own frees and prevent premature exhaustion regardless of the remap pattern. Note that this is only safe for superblocks with lazy sb accounting, but the latter is required for v5 supers and the rmap feature depends on v5. Fixes: b3fed434822d0 ("xfs: account format bouncing into rmapbt swapext tx reservation") Root-caused-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2020-06-29 21:44:36 +00:00
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, flags,
&tp);
if (error)
goto out_unlock;
/*
* Lock and join the inodes to the tansaction so that transaction commit
* or cancel will unlock the inodes from this point onwards.
*/
xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
xfs_trans_ijoin(tp, tip, 0);
/* Verify all data are being swapped */
if (sxp->sx_offset != 0 ||
sxp->sx_length != ip->i_disk_size ||
sxp->sx_length != tip->i_disk_size) {
error = -EFAULT;
goto out_trans_cancel;
}
trace_xfs_swap_extent_before(ip, 0);
trace_xfs_swap_extent_before(tip, 1);
/* check inode formats now that data is flushed */
error = xfs_swap_extents_check_format(ip, tip);
if (error) {
xfs_notice(mp,
"%s: inode 0x%llx format is incompatible for exchanging.",
__func__, ip->i_ino);
goto out_trans_cancel;
}
/*
* Compare the current change & modify times with that
* passed in. If they differ, we abort this swap.
* This is the mechanism used to ensure the calling
* process that the file was not changed out from
* under it.
*/
if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
(sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
(sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
(sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
error = -EBUSY;
goto out_trans_cancel;
}
/*
* Note the trickiness in setting the log flags - we set the owner log
* flag on the opposite inode (i.e. the inode we are setting the new
* owner to be) because once we swap the forks and log that, log
* recovery is going to see the fork as owned by the swapped inode,
* not the pre-swapped inodes.
*/
src_log_flags = XFS_ILOG_CORE;
target_log_flags = XFS_ILOG_CORE;
if (xfs_has_rmapbt(mp))
error = xfs_swap_extent_rmap(&tp, ip, tip);
else
error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
&target_log_flags);
if (error)
goto out_trans_cancel;
/* Do we have to swap reflink flags? */
if ((ip->i_diflags2 & XFS_DIFLAG2_REFLINK) ^
(tip->i_diflags2 & XFS_DIFLAG2_REFLINK)) {
f = ip->i_diflags2 & XFS_DIFLAG2_REFLINK;
ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
ip->i_diflags2 |= tip->i_diflags2 & XFS_DIFLAG2_REFLINK;
tip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
tip->i_diflags2 |= f & XFS_DIFLAG2_REFLINK;
}
/* Swap the cow forks. */
if (xfs_has_reflink(mp)) {
ASSERT(!ip->i_cowfp ||
ip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
ASSERT(!tip->i_cowfp ||
tip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
swap(ip->i_cowfp, tip->i_cowfp);
if (ip->i_cowfp && ip->i_cowfp->if_bytes)
xfs_inode_set_cowblocks_tag(ip);
else
xfs_inode_clear_cowblocks_tag(ip);
if (tip->i_cowfp && tip->i_cowfp->if_bytes)
xfs_inode_set_cowblocks_tag(tip);
else
xfs_inode_clear_cowblocks_tag(tip);
}
xfs_trans_log_inode(tp, ip, src_log_flags);
xfs_trans_log_inode(tp, tip, target_log_flags);
xfs: move bmbt owner change to last step of extent swap The extent swap operation currently resets bmbt block owners before the inode forks are swapped. The bmbt buffers are marked as ordered so they do not have to be physically logged in the transaction. This use of ordered buffers is not safe as bmbt buffers may have been previously physically logged. The bmbt owner change algorithm needs to be updated to physically log buffers that are already dirty when/if they are encountered. This means that an extent swap will eventually require multiple rolling transactions to handle large btrees. In addition, all inode related changes must be logged before the bmbt owner change scan begins and can roll the transaction for the first time to preserve fs consistency via log recovery. In preparation for such fixes to the bmbt owner change algorithm, refactor the bmbt scan out of the extent fork swap code to the last operation before the transaction is committed. Update xfs_swap_extent_forks() to only set the inode log flags when an owner change scan is necessary. Update xfs_swap_extents() to trigger the owner change based on the inode log flags. Note that since the owner change now occurs after the extent fork swap, the inode btrees must be fixed up with the inode number of the current inode (similar to log recovery). Signed-off-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2017-08-29 17:08:39 +00:00
/*
* The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
* have inode number owner values in the bmbt blocks that still refer to
* the old inode. Scan each bmbt to fix up the owner values with the
* inode number of the current inode.
*/
if (src_log_flags & XFS_ILOG_DOWNER) {
error = xfs_swap_change_owner(&tp, ip, tip);
xfs: move bmbt owner change to last step of extent swap The extent swap operation currently resets bmbt block owners before the inode forks are swapped. The bmbt buffers are marked as ordered so they do not have to be physically logged in the transaction. This use of ordered buffers is not safe as bmbt buffers may have been previously physically logged. The bmbt owner change algorithm needs to be updated to physically log buffers that are already dirty when/if they are encountered. This means that an extent swap will eventually require multiple rolling transactions to handle large btrees. In addition, all inode related changes must be logged before the bmbt owner change scan begins and can roll the transaction for the first time to preserve fs consistency via log recovery. In preparation for such fixes to the bmbt owner change algorithm, refactor the bmbt scan out of the extent fork swap code to the last operation before the transaction is committed. Update xfs_swap_extent_forks() to only set the inode log flags when an owner change scan is necessary. Update xfs_swap_extents() to trigger the owner change based on the inode log flags. Note that since the owner change now occurs after the extent fork swap, the inode btrees must be fixed up with the inode number of the current inode (similar to log recovery). Signed-off-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2017-08-29 17:08:39 +00:00
if (error)
goto out_trans_cancel;
}
if (target_log_flags & XFS_ILOG_DOWNER) {
error = xfs_swap_change_owner(&tp, tip, ip);
xfs: move bmbt owner change to last step of extent swap The extent swap operation currently resets bmbt block owners before the inode forks are swapped. The bmbt buffers are marked as ordered so they do not have to be physically logged in the transaction. This use of ordered buffers is not safe as bmbt buffers may have been previously physically logged. The bmbt owner change algorithm needs to be updated to physically log buffers that are already dirty when/if they are encountered. This means that an extent swap will eventually require multiple rolling transactions to handle large btrees. In addition, all inode related changes must be logged before the bmbt owner change scan begins and can roll the transaction for the first time to preserve fs consistency via log recovery. In preparation for such fixes to the bmbt owner change algorithm, refactor the bmbt scan out of the extent fork swap code to the last operation before the transaction is committed. Update xfs_swap_extent_forks() to only set the inode log flags when an owner change scan is necessary. Update xfs_swap_extents() to trigger the owner change based on the inode log flags. Note that since the owner change now occurs after the extent fork swap, the inode btrees must be fixed up with the inode number of the current inode (similar to log recovery). Signed-off-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2017-08-29 17:08:39 +00:00
if (error)
goto out_trans_cancel;
}
/*
* If this is a synchronous mount, make sure that the
* transaction goes to disk before returning to the user.
*/
if (xfs_has_wsync(mp))
xfs_trans_set_sync(tp);
error = xfs_trans_commit(tp);
trace_xfs_swap_extent_after(ip, 0);
trace_xfs_swap_extent_after(tip, 1);
out_unlock_ilock:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
xfs_iunlock(tip, XFS_ILOCK_EXCL);
out_unlock:
filemap_invalidate_unlock_two(VFS_I(ip)->i_mapping,
VFS_I(tip)->i_mapping);
unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
return error;
out_trans_cancel:
xfs_trans_cancel(tp);
goto out_unlock_ilock;
}