bcachefs: Convert bch2_gc_done() for_each_btree_key2()

This converts bch2_gc_stripes_done() and bch2_gc_reflink_done() to the
new for_each_btree_key_commit() macro.

The new for_each_btree_key2() and for_each_btree_key_commit() macros
handles transaction retries, allowing us to avoid nested transactions -
which we want to avoid since they're tricky to do completely correctly
and upcoming assertions are going to be checking for that.

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
This commit is contained in:
Kent Overstreet 2022-07-17 00:44:19 -04:00 committed by Kent Overstreet
parent eace11a730
commit 326568f18c
1 changed files with 120 additions and 118 deletions

View File

@ -1321,21 +1321,19 @@ static inline bool bch2_alloc_v4_cmp(struct bch_alloc_v4 l,
static int bch2_alloc_write_key(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_s_c k,
bool metadata_only)
{
struct bch_fs *c = trans->c;
struct bch_dev *ca = bch_dev_bkey_exists(c, iter->pos.inode);
struct bucket gc, *b;
struct bkey_s_c k;
struct bkey_i_alloc_v4 *a;
struct bch_alloc_v4 old, new;
enum bch_data_type type;
int ret;
k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
if (ret)
return ret;
if (bkey_cmp(iter->pos, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0)
return 1;
bch2_alloc_to_v4(k, &old);
new = old;
@ -1428,23 +1426,13 @@ static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only)
bch2_trans_init(&trans, c, 0, 0);
for_each_member_device(ca, c, i) {
for_each_btree_key(&trans, iter, BTREE_ID_alloc,
POS(ca->dev_idx, ca->mi.first_bucket),
BTREE_ITER_SLOTS|
BTREE_ITER_PREFETCH, k, ret) {
if (bkey_cmp(iter.pos, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0)
break;
ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_alloc,
POS(ca->dev_idx, ca->mi.first_bucket),
BTREE_ITER_SLOTS|BTREE_ITER_PREFETCH, k,
NULL, NULL, BTREE_INSERT_LAZY_RW,
bch2_alloc_write_key(&trans, &iter, k, metadata_only));
ret = commit_do(&trans, NULL, NULL,
BTREE_INSERT_LAZY_RW,
bch2_alloc_write_key(&trans, &iter,
metadata_only));
if (ret)
break;
}
bch2_trans_iter_exit(&trans, &iter);
if (ret) {
if (ret < 0) {
bch_err(c, "error writing alloc info: %i", ret);
percpu_ref_put(&ca->ref);
break;
@ -1452,7 +1440,7 @@ static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only)
}
bch2_trans_exit(&trans);
return ret;
return ret < 0 ? ret : 0;
}
static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
@ -1536,14 +1524,64 @@ static void bch2_gc_alloc_reset(struct bch_fs *c, bool metadata_only)
};
}
static int bch2_gc_write_reflink_key(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_s_c k,
size_t *idx)
{
struct bch_fs *c = trans->c;
const __le64 *refcount = bkey_refcount_c(k);
struct printbuf buf = PRINTBUF;
struct reflink_gc *r;
int ret = 0;
if (!refcount)
return 0;
while ((r = genradix_ptr(&c->reflink_gc_table, *idx)) &&
r->offset < k.k->p.offset)
++*idx;
if (!r ||
r->offset != k.k->p.offset ||
r->size != k.k->size) {
bch_err(c, "unexpected inconsistency walking reflink table at gc finish");
return -EINVAL;
}
if (fsck_err_on(r->refcount != le64_to_cpu(*refcount), c,
"reflink key has wrong refcount:\n"
" %s\n"
" should be %u",
(bch2_bkey_val_to_text(&buf, c, k), buf.buf),
r->refcount)) {
struct bkey_i *new;
new = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
ret = PTR_ERR_OR_ZERO(new);
if (ret)
return ret;
bkey_reassemble(new, k);
if (!r->refcount)
new->k.type = KEY_TYPE_deleted;
else
*bkey_refcount(new) = cpu_to_le64(r->refcount);
ret = bch2_trans_update(trans, iter, new, 0);
}
fsck_err:
printbuf_exit(&buf);
return ret;
}
static int bch2_gc_reflink_done(struct bch_fs *c, bool metadata_only)
{
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
struct reflink_gc *r;
size_t idx = 0;
struct printbuf buf = PRINTBUF;
int ret = 0;
if (metadata_only)
@ -1551,57 +1589,14 @@ static int bch2_gc_reflink_done(struct bch_fs *c, bool metadata_only)
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_reflink, POS_MIN,
BTREE_ITER_PREFETCH, k, ret) {
const __le64 *refcount = bkey_refcount_c(k);
ret = for_each_btree_key_commit(&trans, iter,
BTREE_ID_reflink, POS_MIN,
BTREE_ITER_PREFETCH, k,
NULL, NULL, BTREE_INSERT_NOFAIL,
bch2_gc_write_reflink_key(&trans, &iter, k, &idx));
if (!refcount)
continue;
r = genradix_ptr(&c->reflink_gc_table, idx++);
if (!r ||
r->offset != k.k->p.offset ||
r->size != k.k->size) {
bch_err(c, "unexpected inconsistency walking reflink table at gc finish");
ret = -EINVAL;
break;
}
if (fsck_err_on(r->refcount != le64_to_cpu(*refcount), c,
"reflink key has wrong refcount:\n"
" %s\n"
" should be %u",
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf),
r->refcount)) {
struct bkey_i *new;
new = kmalloc(bkey_bytes(k.k), GFP_KERNEL);
if (!new) {
ret = -ENOMEM;
break;
}
bkey_reassemble(new, k);
if (!r->refcount)
new->k.type = KEY_TYPE_deleted;
else
*bkey_refcount(new) = cpu_to_le64(r->refcount);
ret = commit_do(&trans, NULL, NULL, 0,
__bch2_btree_insert(&trans, BTREE_ID_reflink, new));
kfree(new);
if (ret)
break;
}
}
fsck_err:
bch2_trans_iter_exit(&trans, &iter);
c->reflink_gc_nr = 0;
bch2_trans_exit(&trans);
printbuf_exit(&buf);
return ret;
}
@ -1653,15 +1648,59 @@ static void bch2_gc_reflink_reset(struct bch_fs *c, bool metadata_only)
r->refcount = 0;
}
static int bch2_gc_write_stripes_key(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_s_c k)
{
struct bch_fs *c = trans->c;
struct printbuf buf = PRINTBUF;
const struct bch_stripe *s;
struct gc_stripe *m;
unsigned i;
int ret = 0;
if (k.k->type != KEY_TYPE_stripe)
return 0;
s = bkey_s_c_to_stripe(k).v;
m = genradix_ptr(&c->gc_stripes, k.k->p.offset);
for (i = 0; i < s->nr_blocks; i++)
if (stripe_blockcount_get(s, i) != (m ? m->block_sectors[i] : 0))
goto inconsistent;
return 0;
inconsistent:
if (fsck_err_on(true, c,
"stripe has wrong block sector count %u:\n"
" %s\n"
" should be %u", i,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf),
m ? m->block_sectors[i] : 0)) {
struct bkey_i_stripe *new;
new = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
ret = PTR_ERR_OR_ZERO(new);
if (ret)
return ret;
bkey_reassemble(&new->k_i, k);
for (i = 0; i < new->v.nr_blocks; i++)
stripe_blockcount_set(&new->v, i, m ? m->block_sectors[i] : 0);
ret = bch2_trans_update(trans, iter, &new->k_i, 0);
}
fsck_err:
printbuf_exit(&buf);
return ret;
}
static int bch2_gc_stripes_done(struct bch_fs *c, bool metadata_only)
{
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
struct gc_stripe *m;
const struct bch_stripe *s;
struct printbuf buf = PRINTBUF;
unsigned i;
int ret = 0;
if (metadata_only)
@ -1669,50 +1708,13 @@ static int bch2_gc_stripes_done(struct bch_fs *c, bool metadata_only)
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_stripes, POS_MIN,
BTREE_ITER_PREFETCH, k, ret) {
if (k.k->type != KEY_TYPE_stripe)
continue;
s = bkey_s_c_to_stripe(k).v;
m = genradix_ptr(&c->gc_stripes, k.k->p.offset);
for (i = 0; i < s->nr_blocks; i++)
if (stripe_blockcount_get(s, i) != (m ? m->block_sectors[i] : 0))
goto inconsistent;
continue;
inconsistent:
if (fsck_err_on(true, c,
"stripe has wrong block sector count %u:\n"
" %s\n"
" should be %u", i,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf),
m ? m->block_sectors[i] : 0)) {
struct bkey_i_stripe *new;
new = kmalloc(bkey_bytes(k.k), GFP_KERNEL);
if (!new) {
ret = -ENOMEM;
break;
}
bkey_reassemble(&new->k_i, k);
for (i = 0; i < new->v.nr_blocks; i++)
stripe_blockcount_set(&new->v, i, m ? m->block_sectors[i] : 0);
ret = commit_do(&trans, NULL, NULL, 0,
__bch2_btree_insert(&trans, BTREE_ID_reflink, &new->k_i));
kfree(new);
}
}
fsck_err:
bch2_trans_iter_exit(&trans, &iter);
ret = for_each_btree_key_commit(&trans, iter,
BTREE_ID_stripes, POS_MIN,
BTREE_ITER_PREFETCH, k,
NULL, NULL, BTREE_INSERT_NOFAIL,
bch2_gc_write_stripes_key(&trans, &iter, k));
bch2_trans_exit(&trans);
printbuf_exit(&buf);
return ret;
}