bcachefs: for_each_btree_key2()

This introduces two new macros for iterating through the btree, with
transaction restart handling
 - for_each_btree_key2()
 - for_each_btree_key_commit()

Every iteration is now in an implicit transaction, and - as with
lockrestart_do() and commit_do() - returning -EINTR will cause the
transaction to be restarted, at the same key.

This patch converts a bunch of code that was open coding this to these
new macros, saving a substantial amount of code.

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
This commit is contained in:
Kent Overstreet 2022-07-15 20:51:09 -04:00 committed by Kent Overstreet
parent 0d06b4eca6
commit a1783320d4
6 changed files with 192 additions and 305 deletions

View file

@ -928,16 +928,10 @@ int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
BTREE_ITER_PREFETCH, k, ret) {
ret = commit_do(&trans, NULL, NULL,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_LAZY_RW,
bch2_check_alloc_to_lru_ref(&trans, &iter));
if (ret)
break;
}
bch2_trans_iter_exit(&trans, &iter);
for_each_btree_key_commit(&trans, iter, BTREE_ID_alloc,
POS_MIN, BTREE_ITER_PREFETCH, k,
NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
bch2_check_alloc_to_lru_ref(&trans, &iter));
bch2_trans_exit(&trans);
return ret < 0 ? ret : 0;

View file

@ -1845,10 +1845,15 @@ int bch2_gc(struct bch_fs *c, bool initial, bool metadata_only)
return ret;
}
static bool gc_btree_gens_key(struct bch_fs *c, struct bkey_s_c k)
static int gc_btree_gens_key(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_s_c k)
{
struct bch_fs *c = trans->c;
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const struct bch_extent_ptr *ptr;
struct bkey_i *u;
int ret;
percpu_down_read(&c->mark_lock);
bkey_for_each_ptr(ptrs, ptr) {
@ -1856,7 +1861,7 @@ static bool gc_btree_gens_key(struct bch_fs *c, struct bkey_s_c k)
if (ptr_stale(ca, ptr) > 16) {
percpu_up_read(&c->mark_lock);
return true;
goto update;
}
}
@ -1868,77 +1873,27 @@ static bool gc_btree_gens_key(struct bch_fs *c, struct bkey_s_c k)
*gen = ptr->gen;
}
percpu_up_read(&c->mark_lock);
return 0;
update:
u = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
ret = PTR_ERR_OR_ZERO(u);
if (ret)
return ret;
return false;
bkey_reassemble(u, k);
bch2_extent_normalize(c, bkey_i_to_s(u));
return bch2_trans_update(trans, iter, u, 0);
}
/*
* For recalculating oldest gen, we only need to walk keys in leaf nodes; btree
* node pointers currently never have cached pointers that can become stale:
*/
static int bch2_gc_btree_gens(struct btree_trans *trans, enum btree_id btree_id)
{
struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_s_c k;
struct bkey_buf sk;
int ret = 0, commit_err = 0;
bch2_bkey_buf_init(&sk);
bch2_trans_iter_init(trans, &iter, btree_id, POS_MIN,
BTREE_ITER_PREFETCH|
BTREE_ITER_NOT_EXTENTS|
BTREE_ITER_ALL_SNAPSHOTS);
while ((bch2_trans_begin(trans),
k = bch2_btree_iter_peek(&iter)).k) {
ret = bkey_err(k);
if (ret == -EINTR)
continue;
if (ret)
break;
c->gc_gens_pos = iter.pos;
if (gc_btree_gens_key(c, k) && !commit_err) {
bch2_bkey_buf_reassemble(&sk, c, k);
bch2_extent_normalize(c, bkey_i_to_s(sk.k));
commit_err =
bch2_trans_update(trans, &iter, sk.k, 0) ?:
bch2_trans_commit(trans, NULL, NULL,
BTREE_INSERT_NOWAIT|
BTREE_INSERT_NOFAIL);
if (commit_err == -EINTR) {
commit_err = 0;
continue;
}
}
bch2_btree_iter_advance(&iter);
}
bch2_trans_iter_exit(trans, &iter);
bch2_bkey_buf_exit(&sk, c);
return ret;
}
static int bch2_alloc_write_oldest_gen(struct btree_trans *trans, struct btree_iter *iter)
static int bch2_alloc_write_oldest_gen(struct btree_trans *trans, struct btree_iter *iter,
struct bkey_s_c k)
{
struct bch_dev *ca = bch_dev_bkey_exists(trans->c, iter->pos.inode);
struct bkey_s_c k;
struct bch_alloc_v4 a;
struct bkey_i_alloc_v4 *a_mut;
int ret;
k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
if (ret)
return ret;
bch2_alloc_to_v4(k, &a);
if (a.oldest_gen == ca->oldest_gen[iter->pos.offset])
@ -1998,26 +1953,35 @@ int bch2_gc_gens(struct bch_fs *c)
for (i = 0; i < BTREE_ID_NR; i++)
if ((1 << i) & BTREE_ID_HAS_PTRS) {
struct btree_iter iter;
struct bkey_s_c k;
c->gc_gens_btree = i;
c->gc_gens_pos = POS_MIN;
ret = bch2_gc_btree_gens(&trans, i);
ret = for_each_btree_key_commit(&trans, iter, i,
POS_MIN,
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
k,
NULL, NULL,
BTREE_INSERT_NOFAIL,
gc_btree_gens_key(&trans, &iter, k));
if (ret) {
bch_err(c, "error recalculating oldest_gen: %i", ret);
goto err;
}
}
for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
BTREE_ITER_PREFETCH, k, ret) {
ret = commit_do(&trans, NULL, NULL,
BTREE_INSERT_NOFAIL,
bch2_alloc_write_oldest_gen(&trans, &iter));
if (ret) {
bch_err(c, "error writing oldest_gen: %i", ret);
break;
}
ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_alloc,
POS_MIN,
BTREE_ITER_PREFETCH,
k,
NULL, NULL,
BTREE_INSERT_NOFAIL,
bch2_alloc_write_oldest_gen(&trans, &iter, k));
if (ret) {
bch_err(c, "error writing oldest_gen: %i", ret);
goto err;
}
bch2_trans_iter_exit(&trans, &iter);
c->gc_gens_btree = 0;
c->gc_gens_pos = POS_MIN;

View file

@ -393,6 +393,39 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
return k;
}
#define for_each_btree_key2(_trans, _iter, _btree_id, \
_start, _flags, _k, _do) \
({ \
int _ret = 0; \
\
bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
(_start), (_flags)); \
\
do { \
bch2_trans_begin(_trans); \
(_k) = bch2_btree_iter_peek_type(&(_iter), (_flags)); \
if (!(_k).k) { \
_ret = 0; \
break; \
} \
\
_ret = bkey_err(_k) ?: (_do); \
if (!_ret) \
bch2_btree_iter_advance(&(_iter)); \
} while (_ret == 0 || _ret == -EINTR); \
\
bch2_trans_iter_exit((_trans), &(_iter)); \
_ret; \
})
#define for_each_btree_key_commit(_trans, _iter, _btree_id, \
_start, _iter_flags, _k, \
_disk_res, _journal_seq, _commit_flags,\
_do) \
for_each_btree_key2(_trans, _iter, _btree_id, _start, _iter_flags, _k,\
(_do) ?: bch2_trans_commit(_trans, (_disk_res),\
(_journal_seq), (_commit_flags)))
#define for_each_btree_key(_trans, _iter, _btree_id, \
_start, _flags, _k, _ret) \
for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \

View file

@ -852,24 +852,16 @@ static int hash_check_key(struct btree_trans *trans,
static int check_inode(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_s_c k,
struct bch_inode_unpacked *prev,
struct snapshots_seen *s,
bool full)
{
struct bch_fs *c = trans->c;
struct bkey_s_c k;
struct bch_inode_unpacked u;
bool do_update = false;
int ret;
k = bch2_btree_iter_peek(iter);
if (!k.k)
return 0;
ret = bkey_err(k);
if (ret)
return ret;
ret = check_key_has_snapshot(trans, iter, k);
if (ret < 0)
goto err;
@ -984,7 +976,7 @@ static int check_inode(struct btree_trans *trans,
}
if (do_update) {
ret = write_inode(trans, &u, iter->pos.snapshot);
ret = __write_inode(trans, &u, iter->pos.snapshot);
if (ret)
bch_err(c, "error in fsck: error %i "
"updating inode", ret);
@ -1003,25 +995,19 @@ static int check_inodes(struct bch_fs *c, bool full)
struct btree_iter iter;
struct bch_inode_unpacked prev = { 0 };
struct snapshots_seen s;
struct bkey_s_c k;
int ret;
snapshots_seen_init(&s);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
bch2_trans_iter_init(&trans, &iter, BTREE_ID_inodes, POS_MIN,
BTREE_ITER_INTENT|
BTREE_ITER_PREFETCH|
BTREE_ITER_ALL_SNAPSHOTS);
do {
ret = commit_do(&trans, NULL, NULL,
BTREE_INSERT_LAZY_RW|
BTREE_INSERT_NOFAIL,
check_inode(&trans, &iter, &prev, &s, full));
if (ret)
break;
} while (bch2_btree_iter_advance(&iter));
bch2_trans_iter_exit(&trans, &iter);
ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_inodes,
POS_MIN,
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
k,
NULL, NULL,
BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
check_inode(&trans, &iter, k, &prev, &s, full));
bch2_trans_exit(&trans);
snapshots_seen_exit(&s);
@ -1166,23 +1152,15 @@ static int check_i_sectors(struct btree_trans *trans, struct inode_walker *w)
}
static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
struct bkey_s_c k,
struct inode_walker *inode,
struct snapshots_seen *s)
{
struct bch_fs *c = trans->c;
struct bkey_s_c k;
struct inode_walker_entry *i;
struct printbuf buf = PRINTBUF;
struct bpos equiv;
int ret = 0;
peek:
k = bch2_btree_iter_peek(iter);
if (!k.k)
goto out;
ret = bkey_err(k);
if (ret)
goto err;
ret = check_key_has_snapshot(trans, iter, k);
if (ret) {
@ -1212,7 +1190,7 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
* it shouldn't be but we need to fix the new i_sectors check
* code and delete the old bch2_count_inode_sectors() first
*/
goto peek;
return -EINTR;
}
#if 0
if (bkey_cmp(prev.k->k.p, bkey_start_pos(k.k)) > 0) {
@ -1324,6 +1302,7 @@ static int check_extents(struct bch_fs *c)
struct snapshots_seen s;
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
int ret = 0;
#if 0
@ -1336,21 +1315,12 @@ static int check_extents(struct bch_fs *c)
bch_verbose(c, "checking extents");
bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
POS(BCACHEFS_ROOT_INO, 0),
BTREE_ITER_INTENT|
BTREE_ITER_PREFETCH|
BTREE_ITER_ALL_SNAPSHOTS);
do {
ret = commit_do(&trans, NULL, NULL,
BTREE_INSERT_LAZY_RW|
BTREE_INSERT_NOFAIL,
check_extent(&trans, &iter, &w, &s));
if (ret)
break;
} while (bch2_btree_iter_advance(&iter));
bch2_trans_iter_exit(&trans, &iter);
ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_extents,
POS(BCACHEFS_ROOT_INO, 0),
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
NULL, NULL,
BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
check_extent(&trans, &iter, k, &w, &s));
#if 0
bch2_bkey_buf_exit(&prev, c);
#endif
@ -1522,26 +1492,18 @@ static int check_dirent_target(struct btree_trans *trans,
}
static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
struct bkey_s_c k,
struct bch_hash_info *hash_info,
struct inode_walker *dir,
struct inode_walker *target,
struct snapshots_seen *s)
{
struct bch_fs *c = trans->c;
struct bkey_s_c k;
struct bkey_s_c_dirent d;
struct inode_walker_entry *i;
struct printbuf buf = PRINTBUF;
struct bpos equiv;
int ret = 0;
peek:
k = bch2_btree_iter_peek(iter);
if (!k.k)
goto out;
ret = bkey_err(k);
if (ret)
goto err;
ret = check_key_has_snapshot(trans, iter, k);
if (ret) {
@ -1567,7 +1529,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
if (!iter->path->should_be_locked) {
/* hack: see check_extent() */
goto peek;
return -EINTR;
}
ret = __walk_inode(trans, dir, equiv);
@ -1715,6 +1677,7 @@ static int check_dirents(struct bch_fs *c)
struct bch_hash_info hash_info;
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
int ret = 0;
bch_verbose(c, "checking dirents");
@ -1722,22 +1685,13 @@ static int check_dirents(struct bch_fs *c)
snapshots_seen_init(&s);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
bch2_trans_iter_init(&trans, &iter, BTREE_ID_dirents,
POS(BCACHEFS_ROOT_INO, 0),
BTREE_ITER_INTENT|
BTREE_ITER_PREFETCH|
BTREE_ITER_ALL_SNAPSHOTS);
do {
ret = commit_do(&trans, NULL, NULL,
BTREE_INSERT_LAZY_RW|
BTREE_INSERT_NOFAIL,
check_dirent(&trans, &iter, &hash_info,
&dir, &target, &s));
if (ret)
break;
} while (bch2_btree_iter_advance(&iter));
bch2_trans_iter_exit(&trans, &iter);
ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_dirents,
POS(BCACHEFS_ROOT_INO, 0),
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
k,
NULL, NULL,
BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
check_dirent(&trans, &iter, k, &hash_info, &dir, &target, &s));
bch2_trans_exit(&trans);
snapshots_seen_exit(&s);
@ -1750,21 +1704,13 @@ static int check_dirents(struct bch_fs *c)
}
static int check_xattr(struct btree_trans *trans, struct btree_iter *iter,
struct bkey_s_c k,
struct bch_hash_info *hash_info,
struct inode_walker *inode)
{
struct bch_fs *c = trans->c;
struct bkey_s_c k;
int ret;
k = bch2_btree_iter_peek(iter);
if (!k.k)
return 0;
ret = bkey_err(k);
if (ret)
return ret;
ret = check_key_has_snapshot(trans, iter, k);
if (ret)
return ret;
@ -1803,28 +1749,20 @@ static int check_xattrs(struct bch_fs *c)
struct bch_hash_info hash_info;
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
int ret = 0;
bch_verbose(c, "checking xattrs");
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs,
POS(BCACHEFS_ROOT_INO, 0),
BTREE_ITER_INTENT|
BTREE_ITER_PREFETCH|
BTREE_ITER_ALL_SNAPSHOTS);
do {
ret = commit_do(&trans, NULL, NULL,
BTREE_INSERT_LAZY_RW|
BTREE_INSERT_NOFAIL,
check_xattr(&trans, &iter, &hash_info,
&inode));
if (ret)
break;
} while (bch2_btree_iter_advance(&iter));
bch2_trans_iter_exit(&trans, &iter);
ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_xattrs,
POS(BCACHEFS_ROOT_INO, 0),
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
k,
NULL, NULL,
BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
check_xattr(&trans, &iter, k, &hash_info, &inode));
bch2_trans_exit(&trans);

View file

@ -455,22 +455,14 @@ static void bch2_sb_quota_read(struct bch_fs *c)
}
static int bch2_fs_quota_read_inode(struct btree_trans *trans,
struct btree_iter *iter)
struct btree_iter *iter,
struct bkey_s_c k)
{
struct bch_fs *c = trans->c;
struct bch_inode_unpacked u;
struct bch_subvolume subvolume;
struct bkey_s_c k;
int ret;
k = bch2_btree_iter_peek(iter);
ret = bkey_err(k);
if (ret)
return ret;
if (!k.k)
return 1;
ret = bch2_snapshot_get_subvol(trans, k.k->p.snapshot, &subvolume);
if (ret)
return ret;
@ -503,6 +495,7 @@ int bch2_fs_quota_read(struct bch_fs *c)
struct bch_memquota_type *q;
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
int ret;
mutex_lock(&c->sb_lock);
@ -517,18 +510,18 @@ int bch2_fs_quota_read(struct bch_fs *c)
bch2_trans_init(&trans, c, 0, 0);
bch2_trans_iter_init(&trans, &iter, BTREE_ID_inodes, POS_MIN,
ret = for_each_btree_key2(&trans, iter, BTREE_ID_inodes,
POS_MIN,
BTREE_ITER_INTENT|
BTREE_ITER_PREFETCH|
BTREE_ITER_ALL_SNAPSHOTS);
do {
ret = lockrestart_do(&trans,
bch2_fs_quota_read_inode(&trans, &iter));
} while (!ret);
bch2_trans_iter_exit(&trans, &iter);
BTREE_ITER_ALL_SNAPSHOTS,
k,
bch2_fs_quota_read_inode(&trans, &iter, k));
if (ret)
bch_err(c, "err reading inodes in quota init: %i", ret);
bch2_trans_exit(&trans);
return ret < 0 ? ret : 0;
return ret;
}
/* Enable/disable/delete quotas for an entire filesystem: */

View file

@ -131,7 +131,7 @@ static int snapshot_live(struct btree_trans *trans, u32 id)
if (!id)
return 0;
ret = lockrestart_do(trans, snapshot_lookup(trans, id, &v));
ret = snapshot_lookup(trans, id, &v);
if (ret == -ENOENT)
bch_err(trans->c, "snapshot node %u not found", id);
if (ret)
@ -140,15 +140,20 @@ static int snapshot_live(struct btree_trans *trans, u32 id)
return !BCH_SNAPSHOT_DELETED(&v);
}
static int bch2_snapshot_set_equiv(struct btree_trans *trans,
struct bkey_s_c_snapshot snap)
static int bch2_snapshot_set_equiv(struct btree_trans *trans, struct bkey_s_c k)
{
struct bch_fs *c = trans->c;
unsigned i, nr_live = 0, live_idx = 0;
u32 id = snap.k->p.offset, child[2] = {
[0] = le32_to_cpu(snap.v->children[0]),
[1] = le32_to_cpu(snap.v->children[1])
};
struct bkey_s_c_snapshot snap;
u32 id = k.k->p.offset, child[2];
if (k.k->type != KEY_TYPE_snapshot)
return 0;
snap = bkey_s_c_to_snapshot(k);
child[0] = le32_to_cpu(snap.v->children[0]);
child[1] = le32_to_cpu(snap.v->children[1]);
for (i = 0; i < 2; i++) {
int ret = snapshot_live(trans, child[i]);
@ -166,58 +171,27 @@ static int bch2_snapshot_set_equiv(struct btree_trans *trans,
return 0;
}
static int bch2_snapshots_set_equiv(struct btree_trans *trans)
{
struct btree_iter iter;
struct bkey_s_c k;
int ret;
for_each_btree_key(trans, iter, BTREE_ID_snapshots,
POS_MIN, 0, k, ret) {
if (k.k->type != KEY_TYPE_snapshot)
continue;
ret = bch2_snapshot_set_equiv(trans, bkey_s_c_to_snapshot(k));
if (ret)
break;
}
bch2_trans_iter_exit(trans, &iter);
if (ret)
bch_err(trans->c, "error in bch2_snapshots_set_equiv: %i", ret);
return ret;
}
/* fsck: */
static int check_snapshot(struct btree_trans *trans,
struct btree_iter *iter)
struct btree_iter *iter,
struct bkey_s_c k)
{
struct bch_fs *c = trans->c;
struct bkey_s_c_snapshot s;
struct bch_subvolume subvol;
struct bch_snapshot v;
struct bkey_s_c k;
struct printbuf buf = PRINTBUF;
bool should_have_subvol;
u32 i, id;
int ret = 0;
k = bch2_btree_iter_peek(iter);
if (!k.k)
return 0;
ret = bkey_err(k);
if (ret)
return ret;
if (k.k->type != KEY_TYPE_snapshot)
return 0;
s = bkey_s_c_to_snapshot(k);
id = le32_to_cpu(s.v->parent);
if (id) {
ret = lockrestart_do(trans, snapshot_lookup(trans, id, &v));
ret = snapshot_lookup(trans, id, &v);
if (ret == -ENOENT)
bch_err(c, "snapshot with nonexistent parent:\n %s",
(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf));
@ -236,7 +210,7 @@ static int check_snapshot(struct btree_trans *trans,
for (i = 0; i < 2 && s.v->children[i]; i++) {
id = le32_to_cpu(s.v->children[i]);
ret = lockrestart_do(trans, snapshot_lookup(trans, id, &v));
ret = snapshot_lookup(trans, id, &v);
if (ret == -ENOENT)
bch_err(c, "snapshot node %llu has nonexistent child %u",
s.k->p.offset, id);
@ -256,7 +230,7 @@ static int check_snapshot(struct btree_trans *trans,
if (should_have_subvol) {
id = le32_to_cpu(s.v->subvol);
ret = lockrestart_do(trans, bch2_subvolume_get(trans, id, 0, false, &subvol));
ret = bch2_subvolume_get(trans, id, 0, false, &subvol);
if (ret == -ENOENT)
bch_err(c, "snapshot points to nonexistent subvolume:\n %s",
(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf));
@ -298,22 +272,16 @@ int bch2_fs_check_snapshots(struct bch_fs *c)
{
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
int ret;
bch2_trans_init(&trans, c, 0, 0);
bch2_trans_iter_init(&trans, &iter, BTREE_ID_snapshots,
POS_MIN, BTREE_ITER_PREFETCH);
do {
ret = commit_do(&trans, NULL, NULL,
BTREE_INSERT_LAZY_RW|
BTREE_INSERT_NOFAIL,
check_snapshot(&trans, &iter));
if (ret)
break;
} while (bch2_btree_iter_advance(&iter));
bch2_trans_iter_exit(&trans, &iter);
ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_snapshots,
POS(BCACHEFS_ROOT_INO, 0),
BTREE_ITER_PREFETCH, k,
NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
check_snapshot(&trans, &iter, k));
if (ret)
bch_err(c, "error %i checking snapshots", ret);
@ -404,20 +372,10 @@ int bch2_fs_snapshots_start(struct bch_fs *c)
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_snapshots,
POS_MIN, 0, k, ret) {
if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0)
break;
if (k.k->type != KEY_TYPE_snapshot)
continue;
ret = bch2_mark_snapshot(&trans, bkey_s_c_null, k, 0) ?:
bch2_snapshot_set_equiv(&trans, bkey_s_c_to_snapshot(k));
if (ret)
break;
}
bch2_trans_iter_exit(&trans, &iter);
for_each_btree_key2(&trans, iter, BTREE_ID_snapshots,
POS_MIN, 0, k,
bch2_mark_snapshot(&trans, bkey_s_c_null, k, 0) ?:
bch2_snapshot_set_equiv(&trans, k));
bch2_trans_exit(&trans);
@ -692,6 +650,34 @@ static int bch2_snapshot_delete_keys_btree(struct btree_trans *trans,
return ret;
}
static int bch2_delete_redundant_snapshot(struct btree_trans *trans, struct btree_iter *iter,
struct bkey_s_c k)
{
struct bkey_s_c_snapshot snap;
u32 children[2];
int ret;
if (k.k->type != KEY_TYPE_snapshot)
return 0;
snap = bkey_s_c_to_snapshot(k);
if (BCH_SNAPSHOT_DELETED(snap.v) ||
BCH_SNAPSHOT_SUBVOL(snap.v))
return 0;
children[0] = le32_to_cpu(snap.v->children[0]);
children[1] = le32_to_cpu(snap.v->children[1]);
ret = snapshot_live(trans, children[0]) ?:
snapshot_live(trans, children[1]);
if (ret < 0)
return ret;
if (!ret)
return bch2_snapshot_node_set_deleted(trans, k.k->p.offset);
return 0;
}
int bch2_delete_dead_snapshots(struct bch_fs *c)
{
struct btree_trans trans;
@ -699,7 +685,7 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
struct bkey_s_c k;
struct bkey_s_c_snapshot snap;
snapshot_id_list deleted = { 0 };
u32 i, id, children[2];
u32 i, id;
int ret = 0;
if (!test_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags))
@ -719,43 +705,22 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
* For every snapshot node: If we have no live children and it's not
* pointed to by a subvolume, delete it:
*/
for_each_btree_key(&trans, iter, BTREE_ID_snapshots,
POS_MIN, 0, k, ret) {
if (k.k->type != KEY_TYPE_snapshot)
continue;
snap = bkey_s_c_to_snapshot(k);
if (BCH_SNAPSHOT_DELETED(snap.v) ||
BCH_SNAPSHOT_SUBVOL(snap.v))
continue;
children[0] = le32_to_cpu(snap.v->children[0]);
children[1] = le32_to_cpu(snap.v->children[1]);
ret = snapshot_live(&trans, children[0]) ?:
snapshot_live(&trans, children[1]);
if (ret < 0)
break;
if (ret)
continue;
ret = commit_do(&trans, NULL, NULL, 0,
bch2_snapshot_node_set_deleted(&trans, iter.pos.offset));
if (ret) {
bch_err(c, "error deleting snapshot %llu: %i", iter.pos.offset, ret);
break;
}
}
bch2_trans_iter_exit(&trans, &iter);
ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_snapshots,
POS_MIN, 0, k,
NULL, NULL, 0,
bch2_delete_redundant_snapshot(&trans, &iter, k));
if (ret) {
bch_err(c, "error walking snapshots: %i", ret);
bch_err(c, "error deleting redundant snapshots: %i", ret);
goto err;
}
ret = bch2_snapshots_set_equiv(&trans);
if (ret)
for_each_btree_key2(&trans, iter, BTREE_ID_snapshots,
POS_MIN, 0, k,
bch2_snapshot_set_equiv(&trans, k));
if (ret) {
bch_err(c, "error in bch2_snapshots_set_equiv: %i", ret);
goto err;
}
for_each_btree_key(&trans, iter, BTREE_ID_snapshots,
POS_MIN, 0, k, ret) {