bcachefs: Split out struct gc_stripe from struct stripe

We have two radix trees of stripes - one that mirrors some information
from the stripes btree in normal operation, and another that GC uses to
recalculate block usage counts.

The normal one is now only used for finding partially empty stripes in
order to reuse them - the normal stripes radix tree and the GC stripes
radix tree are used significantly differently, so this patch splits them
into separate types.

In an upcoming patch we'll be replacing c->stripes with a btree that
indexes stripes by the order we want to reuse them.

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
This commit is contained in:
Kent Overstreet 2021-12-04 23:07:33 -05:00 committed by Kent Overstreet
parent f54788cc8c
commit 990d42d187
7 changed files with 180 additions and 219 deletions

View File

@ -826,7 +826,8 @@ mempool_t bio_bounce_pages;
struct mutex data_progress_lock;
/* STRIPES: */
GENRADIX(struct stripe) stripes[2];
GENRADIX(struct stripe) stripes;
GENRADIX(struct gc_stripe) gc_stripes;
ec_stripes_heap ec_stripes_heap;
spinlock_t ec_stripes_heap_lock;

View File

@ -597,7 +597,7 @@ static int bch2_check_fix_ptrs(struct bch_fs *c, enum btree_id btree_id,
}
if (p.has_ec) {
struct stripe *m = genradix_ptr(&c->stripes[true], p.ec.idx);
struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx);
if (fsck_err_on(!m || !m->alive, c,
"pointer to nonexistent stripe %llu\n"
@ -665,7 +665,7 @@ again:
ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
bkey_extent_entry_for_each(ptrs, entry) {
if (extent_entry_type(entry) == BCH_EXTENT_ENTRY_stripe_ptr) {
struct stripe *m = genradix_ptr(&c->stripes[true],
struct gc_stripe *m = genradix_ptr(&c->gc_stripes,
entry->stripe_ptr.idx);
union bch_extent_entry *next_ptr;
@ -1132,7 +1132,8 @@ static void bch2_gc_free(struct bch_fs *c)
struct bch_dev *ca;
unsigned i;
genradix_free(&c->stripes[1]);
genradix_free(&c->reflink_gc_table);
genradix_free(&c->gc_stripes);
for_each_member_device(ca, c, i) {
kvpfree(rcu_dereference_protected(ca->buckets[1], 1),
@ -1191,35 +1192,6 @@ static int bch2_gc_done(struct bch_fs *c,
#define copy_fs_field(_f, _msg, ...) \
copy_field(_f, "fs has wrong " _msg, ##__VA_ARGS__)
if (!metadata_only) {
struct genradix_iter iter = genradix_iter_init(&c->stripes[1], 0);
struct stripe *dst, *src;
while ((src = genradix_iter_peek(&iter, &c->stripes[1]))) {
dst = genradix_ptr_alloc(&c->stripes[0], iter.pos, GFP_KERNEL);
if (dst->alive != src->alive ||
dst->sectors != src->sectors ||
dst->algorithm != src->algorithm ||
dst->nr_blocks != src->nr_blocks ||
dst->nr_redundant != src->nr_redundant) {
bch_err(c, "unexpected stripe inconsistency at bch2_gc_done, confused");
ret = -EINVAL;
goto fsck_err;
}
for (i = 0; i < ARRAY_SIZE(dst->block_sectors); i++)
copy_stripe_field(block_sectors[i],
"block_sectors[%u]", i);
dst->blocks_nonempty = 0;
for (i = 0; i < dst->nr_blocks; i++)
dst->blocks_nonempty += dst->block_sectors[i] != 0;
genradix_iter_advance(&iter, &c->stripes[1]);
}
}
for (i = 0; i < ARRAY_SIZE(c->usage); i++)
bch2_fs_usage_acc_to_base(c, i);
@ -1510,12 +1482,82 @@ static int bch2_gc_reflink_done(struct bch_fs *c, bool initial,
fsck_err:
bch2_trans_iter_exit(&trans, &iter);
out:
genradix_free(&c->reflink_gc_table);
c->reflink_gc_nr = 0;
bch2_trans_exit(&trans);
return ret;
}
static int bch2_gc_stripes_done_initial_fn(struct btree_trans *trans,
struct bkey_s_c k)
{
struct bch_fs *c = trans->c;
struct gc_stripe *m;
const struct bch_stripe *s;
char buf[200];
unsigned i;
int ret = 0;
if (k.k->type != KEY_TYPE_stripe)
return 0;
s = bkey_s_c_to_stripe(k).v;
m = genradix_ptr(&c->gc_stripes, k.k->p.offset);
for (i = 0; i < s->nr_blocks; i++)
if (stripe_blockcount_get(s, i) != (m ? m->block_sectors[i] : 0))
goto inconsistent;
return 0;
inconsistent:
if (fsck_err_on(true, c,
"stripe has wrong block sector count %u:\n"
" %s\n"
" should be %u", i,
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf),
m ? m->block_sectors[i] : 0)) {
struct bkey_i_stripe *new;
new = kmalloc(bkey_bytes(k.k), GFP_KERNEL);
if (!new) {
ret = -ENOMEM;
goto fsck_err;
}
bkey_reassemble(&new->k_i, k);
for (i = 0; i < new->v.nr_blocks; i++)
stripe_blockcount_set(&new->v, i, m ? m->block_sectors[i] : 0);
ret = bch2_journal_key_insert(c, BTREE_ID_stripes, 0, &new->k_i);
if (ret)
kfree(new);
}
fsck_err:
return ret;
}
static int bch2_gc_stripes_done(struct bch_fs *c, bool initial,
bool metadata_only)
{
struct btree_trans trans;
int ret = 0;
if (metadata_only)
return 0;
bch2_trans_init(&trans, c, 0, 0);
if (initial) {
ret = bch2_btree_and_journal_walk(&trans, BTREE_ID_stripes,
bch2_gc_stripes_done_initial_fn);
} else {
BUG();
}
bch2_trans_exit(&trans);
return ret;
}
static int bch2_gc_reflink_start_initial_fn(struct btree_trans *trans,
struct bkey_s_c k)
{
@ -1551,7 +1593,6 @@ static int bch2_gc_reflink_start(struct bch_fs *c, bool initial,
return 0;
bch2_trans_init(&trans, c, 0, 0);
genradix_free(&c->reflink_gc_table);
c->reflink_gc_nr = 0;
if (initial) {
@ -1685,6 +1726,7 @@ out:
percpu_down_write(&c->mark_lock);
ret = bch2_gc_reflink_done(c, initial, metadata_only) ?:
bch2_gc_stripes_done(c, initial, metadata_only) ?:
bch2_gc_done(c, initial, metadata_only);
bch2_journal_unblock(&c->journal);

View File

@ -953,39 +953,34 @@ static int bch2_mark_stripe_ptr(struct btree_trans *trans,
bool gc = flags & BTREE_TRIGGER_GC;
struct bch_fs *c = trans->c;
struct bch_replicas_padded r;
struct stripe *m;
unsigned i, blocks_nonempty = 0;
m = genradix_ptr(&c->stripes[gc], p.idx);
if (!gc) {
BUG();
} else {
struct gc_stripe *m = genradix_ptr_alloc(&c->gc_stripes, p.idx, GFP_KERNEL);
spin_lock(&c->ec_stripes_heap_lock);
if (!m)
return -ENOMEM;
if (!m || !m->alive) {
spin_lock(&c->ec_stripes_heap_lock);
if (!m || !m->alive) {
spin_unlock(&c->ec_stripes_heap_lock);
bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
(u64) p.idx);
bch2_inconsistent_error(c);
return -EIO;
}
m->block_sectors[p.block] += sectors;
r = m->r;
spin_unlock(&c->ec_stripes_heap_lock);
bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
(u64) p.idx);
bch2_inconsistent_error(c);
return -EIO;
r.e.data_type = data_type;
update_replicas(c, k, &r.e, sectors, trans->journal_res.seq, gc);
}
m->block_sectors[p.block] += sectors;
r = m->r;
for (i = 0; i < m->nr_blocks; i++)
blocks_nonempty += m->block_sectors[i] != 0;
if (m->blocks_nonempty != blocks_nonempty) {
m->blocks_nonempty = blocks_nonempty;
if (!gc)
bch2_stripes_heap_update(c, m, p.idx);
}
spin_unlock(&c->ec_stripes_heap_lock);
r.e.data_type = data_type;
update_replicas(c, k, &r.e, sectors, trans->journal_res.seq, gc);
return 0;
}
@ -1081,67 +1076,69 @@ static int bch2_mark_stripe(struct btree_trans *trans,
? bkey_s_c_to_stripe(old).v : NULL;
const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
? bkey_s_c_to_stripe(new).v : NULL;
struct stripe *m = genradix_ptr(&c->stripes[gc], idx);
unsigned i;
int ret;
BUG_ON(gc && old_s);
if (!m || (old_s && !m->alive)) {
char buf1[200], buf2[200];
if (!gc) {
struct stripe *m = genradix_ptr(&c->stripes, idx);
bch2_bkey_val_to_text(&PBUF(buf1), c, old);
bch2_bkey_val_to_text(&PBUF(buf2), c, new);
bch_err_ratelimited(c, "error marking nonexistent stripe %zu while marking\n"
"old %s\n"
"new %s", idx, buf1, buf2);
bch2_inconsistent_error(c);
return -1;
}
if (!m || (old_s && !m->alive)) {
char buf1[200], buf2[200];
if (!new_s) {
spin_lock(&c->ec_stripes_heap_lock);
bch2_stripes_heap_del(c, m, idx);
spin_unlock(&c->ec_stripes_heap_lock);
memset(m, 0, sizeof(*m));
} else {
m->alive = true;
m->sectors = le16_to_cpu(new_s->sectors);
m->algorithm = new_s->algorithm;
m->nr_blocks = new_s->nr_blocks;
m->nr_redundant = new_s->nr_redundant;
m->blocks_nonempty = 0;
for (i = 0; i < new_s->nr_blocks; i++) {
m->block_sectors[i] =
stripe_blockcount_get(new_s, i);
m->blocks_nonempty += !!m->block_sectors[i];
m->ptrs[i] = new_s->ptrs[i];
bch2_bkey_val_to_text(&PBUF(buf1), c, old);
bch2_bkey_val_to_text(&PBUF(buf2), c, new);
bch_err_ratelimited(c, "error marking nonexistent stripe %zu while marking\n"
"old %s\n"
"new %s", idx, buf1, buf2);
bch2_inconsistent_error(c);
return -1;
}
bch2_bkey_to_replicas(&m->r.e, new);
if (!new_s) {
spin_lock(&c->ec_stripes_heap_lock);
bch2_stripes_heap_del(c, m, idx);
spin_unlock(&c->ec_stripes_heap_lock);
memset(m, 0, sizeof(*m));
} else {
m->alive = true;
m->sectors = le16_to_cpu(new_s->sectors);
m->algorithm = new_s->algorithm;
m->nr_blocks = new_s->nr_blocks;
m->nr_redundant = new_s->nr_redundant;
m->blocks_nonempty = 0;
for (i = 0; i < new_s->nr_blocks; i++)
m->blocks_nonempty += !!stripe_blockcount_get(new_s, i);
if (!gc) {
spin_lock(&c->ec_stripes_heap_lock);
bch2_stripes_heap_update(c, m, idx);
spin_unlock(&c->ec_stripes_heap_lock);
}
}
} else {
struct gc_stripe *m = genradix_ptr(&c->gc_stripes, idx);
if (gc) {
/*
* This will be wrong when we bring back runtime gc: we should
* be unmarking the old key and then marking the new key
*/
m->alive = true;
m->sectors = le16_to_cpu(new_s->sectors);
m->nr_blocks = new_s->nr_blocks;
m->nr_redundant = new_s->nr_redundant;
for (i = 0; i < new_s->nr_blocks; i++)
m->ptrs[i] = new_s->ptrs[i];
bch2_bkey_to_replicas(&m->r.e, new);
/*
* gc recalculates this field from stripe ptr
* references:
*/
memset(m->block_sectors, 0, sizeof(m->block_sectors));
m->blocks_nonempty = 0;
for (i = 0; i < new_s->nr_blocks; i++) {
ret = mark_stripe_bucket(trans, new, i, journal_seq, flags);
@ -1602,6 +1599,7 @@ static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
stripe_blockcount_set(&s->v, p.ec.block,
stripe_blockcount_get(&s->v, p.ec.block) +
sectors);
ret = bch2_trans_update(trans, &iter, &s->k_i, 0);
if (ret)
goto err;

View File

@ -545,11 +545,11 @@ static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp)
free_heap(&n);
}
if (!genradix_ptr_alloc(&c->stripes[0], idx, gfp))
if (!genradix_ptr_alloc(&c->stripes, idx, gfp))
return -ENOMEM;
if (c->gc_pos.phase != GC_PHASE_NOT_RUNNING &&
!genradix_ptr_alloc(&c->stripes[1], idx, gfp))
!genradix_ptr_alloc(&c->gc_stripes, idx, gfp))
return -ENOMEM;
return 0;
@ -594,13 +594,13 @@ static inline void ec_stripes_heap_set_backpointer(ec_stripes_heap *h,
{
struct bch_fs *c = container_of(h, struct bch_fs, ec_stripes_heap);
genradix_ptr(&c->stripes[0], h->data[i].idx)->heap_idx = i;
genradix_ptr(&c->stripes, h->data[i].idx)->heap_idx = i;
}
static void heap_verify_backpointer(struct bch_fs *c, size_t idx)
{
ec_stripes_heap *h = &c->ec_stripes_heap;
struct stripe *m = genradix_ptr(&c->stripes[0], idx);
struct stripe *m = genradix_ptr(&c->stripes, idx);
BUG_ON(!m->alive);
BUG_ON(m->heap_idx >= h->used);
@ -692,7 +692,7 @@ static void ec_stripe_delete_work(struct work_struct *work)
break;
}
bch2_stripes_heap_del(c, genradix_ptr(&c->stripes[0], idx), idx);
bch2_stripes_heap_del(c, genradix_ptr(&c->stripes, idx), idx);
spin_unlock(&c->ec_stripes_heap_lock);
if (ec_stripe_delete(c, idx))
@ -702,22 +702,18 @@ static void ec_stripe_delete_work(struct work_struct *work)
/* stripe creation: */
static int ec_stripe_bkey_insert(struct bch_fs *c,
static int ec_stripe_bkey_insert(struct btree_trans *trans,
struct bkey_i_stripe *stripe,
struct disk_reservation *res)
{
struct btree_trans trans;
struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_s_c k;
struct bpos min_pos = POS(0, 1);
struct bpos start_pos = bpos_max(min_pos, POS(0, c->ec_stripe_hint));
int ret;
bch2_trans_init(&trans, c, 0, 0);
retry:
bch2_trans_begin(&trans);
for_each_btree_key(&trans, iter, BTREE_ID_stripes, start_pos,
for_each_btree_key(trans, iter, BTREE_ID_stripes, start_pos,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0) {
if (start_pos.offset) {
@ -738,29 +734,24 @@ retry:
found_slot:
start_pos = iter.pos;
ret = ec_stripe_mem_alloc(&trans, &iter);
ret = ec_stripe_mem_alloc(trans, &iter);
if (ret)
goto err;
stripe->k.p = iter.pos;
ret = bch2_trans_update(&trans, &iter, &stripe->k_i, 0) ?:
bch2_trans_commit(&trans, res, NULL,
BTREE_INSERT_NOFAIL);
ret = bch2_trans_update(trans, &iter, &stripe->k_i, 0);
c->ec_stripe_hint = start_pos.offset;
err:
bch2_trans_iter_exit(&trans, &iter);
if (ret == -EINTR)
goto retry;
c->ec_stripe_hint = ret ? start_pos.offset : start_pos.offset + 1;
bch2_trans_exit(&trans);
bch2_trans_iter_exit(trans, &iter);
return ret;
}
static int ec_stripe_bkey_update(struct btree_trans *trans,
struct bkey_i_stripe *new)
struct bkey_i_stripe *new,
struct disk_reservation *res)
{
struct btree_iter iter;
struct bkey_s_c k;
@ -947,10 +938,10 @@ static void ec_stripe_create(struct ec_stripe_new *s)
goto err_put_writes;
}
ret = s->have_existing_stripe
? bch2_trans_do(c, &s->res, NULL, BTREE_INSERT_NOFAIL,
ec_stripe_bkey_update(&trans, &s->new_stripe.key))
: ec_stripe_bkey_insert(c, &s->new_stripe.key, &s->res);
ret = bch2_trans_do(c, &s->res, NULL, BTREE_INSERT_NOFAIL,
s->have_existing_stripe
? ec_stripe_bkey_update(&trans, &s->new_stripe.key, &s->res)
: ec_stripe_bkey_insert(&trans, &s->new_stripe.key, &s->res));
if (ret) {
bch_err(c, "error creating stripe: error creating stripe key");
goto err_put_writes;
@ -965,7 +956,7 @@ static void ec_stripe_create(struct ec_stripe_new *s)
}
spin_lock(&c->ec_stripes_heap_lock);
m = genradix_ptr(&c->stripes[0], s->new_stripe.key.k.p.offset);
m = genradix_ptr(&c->stripes, s->new_stripe.key.k.p.offset);
BUG_ON(m->on_heap);
bch2_stripes_heap_insert(c, m, s->new_stripe.key.k.p.offset);
@ -1381,7 +1372,7 @@ static s64 get_existing_stripe(struct bch_fs *c,
continue;
stripe_idx = h->data[heap_idx].idx;
m = genradix_ptr(&c->stripes[0], stripe_idx);
m = genradix_ptr(&c->stripes, stripe_idx);
if (m->algorithm == head->algo &&
m->nr_redundant == head->redundancy &&
@ -1555,85 +1546,11 @@ void bch2_stripes_heap_start(struct bch_fs *c)
struct genradix_iter iter;
struct stripe *m;
genradix_for_each(&c->stripes[0], iter, m)
genradix_for_each(&c->stripes, iter, m)
if (m->alive)
bch2_stripes_heap_insert(c, m, iter.pos);
}
static int __bch2_stripe_write_key(struct btree_trans *trans,
struct btree_iter *iter,
struct stripe *m,
size_t idx,
struct bkey_i_stripe *new_key)
{
const struct bch_stripe *v;
struct bkey_s_c k;
unsigned i;
int ret;
bch2_btree_iter_set_pos(iter, POS(0, idx));
k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
if (ret)
return ret;
if (k.k->type != KEY_TYPE_stripe)
return -EIO;
v = bkey_s_c_to_stripe(k).v;
for (i = 0; i < v->nr_blocks; i++)
if (m->block_sectors[i] != stripe_blockcount_get(v, i))
goto write;
return 0;
write:
bkey_reassemble(&new_key->k_i, k);
for (i = 0; i < new_key->v.nr_blocks; i++)
stripe_blockcount_set(&new_key->v, i,
m->block_sectors[i]);
return bch2_trans_update(trans, iter, &new_key->k_i, 0);
}
int bch2_stripes_write(struct bch_fs *c, unsigned flags)
{
struct btree_trans trans;
struct btree_iter iter;
struct genradix_iter giter;
struct bkey_i_stripe *new_key;
struct stripe *m;
int ret = 0;
new_key = kmalloc(255 * sizeof(u64), GFP_KERNEL);
BUG_ON(!new_key);
bch2_trans_init(&trans, c, 0, 0);
bch2_trans_iter_init(&trans, &iter, BTREE_ID_stripes, POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
genradix_for_each(&c->stripes[0], giter, m) {
if (!m->alive)
continue;
ret = __bch2_trans_do(&trans, NULL, NULL,
BTREE_INSERT_NOFAIL|flags,
__bch2_stripe_write_key(&trans, &iter, m,
giter.pos, new_key));
if (ret)
break;
}
bch2_trans_iter_exit(&trans, &iter);
bch2_trans_exit(&trans);
kfree(new_key);
return ret;
}
static int bch2_stripes_read_fn(struct btree_trans *trans, struct bkey_s_c k)
{
const struct bch_stripe *s;
@ -1651,7 +1568,7 @@ static int bch2_stripes_read_fn(struct btree_trans *trans, struct bkey_s_c k)
s = bkey_s_c_to_stripe(k).v;
m = genradix_ptr(&c->stripes[0], k.k->p.offset);
m = genradix_ptr(&c->stripes, k.k->p.offset);
m->alive = true;
m->sectors = le16_to_cpu(s->sectors);
m->algorithm = s->algorithm;
@ -1659,14 +1576,8 @@ static int bch2_stripes_read_fn(struct btree_trans *trans, struct bkey_s_c k)
m->nr_redundant = s->nr_redundant;
m->blocks_nonempty = 0;
for (i = 0; i < s->nr_blocks; i++) {
m->block_sectors[i] =
stripe_blockcount_get(s, i);
m->blocks_nonempty += !!m->block_sectors[i];
m->ptrs[i] = s->ptrs[i];
}
bch2_bkey_to_replicas(&m->r.e, k);
for (i = 0; i < s->nr_blocks; i++)
m->blocks_nonempty += !!stripe_blockcount_get(s, i);
spin_lock(&c->ec_stripes_heap_lock);
bch2_stripes_heap_update(c, m, k.k->p.offset);
@ -1722,7 +1633,9 @@ int bch2_ec_mem_alloc(struct bch_fs *c, bool gc)
ret = genradix_prealloc(&c->stripes[gc], idx, GFP_KERNEL);
#else
for (i = 0; i < idx; i++)
if (!genradix_ptr_alloc(&c->stripes[gc], i, GFP_KERNEL))
if (!gc
? !genradix_ptr_alloc(&c->stripes, i, GFP_KERNEL)
: !genradix_ptr_alloc(&c->gc_stripes, i, GFP_KERNEL))
return -ENOMEM;
#endif
return 0;
@ -1736,7 +1649,7 @@ void bch2_stripes_heap_to_text(struct printbuf *out, struct bch_fs *c)
spin_lock(&c->ec_stripes_heap_lock);
for (i = 0; i < min_t(size_t, h->used, 20); i++) {
m = genradix_ptr(&c->stripes[0], h->data[i].idx);
m = genradix_ptr(&c->stripes, h->data[i].idx);
pr_buf(out, "%zu %u/%u+%u\n", h->data[i].idx,
h->data[i].blocks_nonempty,
@ -1794,7 +1707,7 @@ void bch2_fs_ec_exit(struct bch_fs *c)
BUG_ON(!list_empty(&c->ec_stripe_new_list));
free_heap(&c->ec_stripes_heap);
genradix_free(&c->stripes[0]);
genradix_free(&c->stripes);
bioset_exit(&c->ec_bioset);
}

View File

@ -108,7 +108,7 @@ static inline bool bch2_ptr_matches_stripe(const struct bch_stripe *s,
le16_to_cpu(s->sectors));
}
static inline bool bch2_ptr_matches_stripe_m(const struct stripe *m,
static inline bool bch2_ptr_matches_stripe_m(const struct gc_stripe *m,
struct extent_ptr_decoded p)
{
unsigned nr_data = m->nr_blocks - m->nr_redundant;
@ -216,7 +216,6 @@ void bch2_ec_flush_new_stripes(struct bch_fs *);
void bch2_stripes_heap_start(struct bch_fs *);
int bch2_stripes_read(struct bch_fs *);
int bch2_stripes_write(struct bch_fs *, unsigned);
int bch2_ec_mem_alloc(struct bch_fs *, bool);

View File

@ -21,6 +21,15 @@ struct stripe {
unsigned alive:1; /* does a corresponding key exist in stripes btree? */
unsigned on_heap:1;
u8 blocks_nonempty;
};
struct gc_stripe {
u16 sectors;
u8 nr_blocks;
u8 nr_redundant;
unsigned alive:1; /* does a corresponding key exist in stripes btree? */
u16 block_sectors[BCH_BKEY_PTRS_MAX];
struct bch_extent_ptr ptrs[BCH_BKEY_PTRS_MAX];

View File

@ -1238,8 +1238,7 @@ use_clean:
*/
bch_verbose(c, "writing allocation info");
err = "error writing out alloc info";
ret = bch2_stripes_write(c, BTREE_INSERT_LAZY_RW) ?:
bch2_alloc_write(c, BTREE_INSERT_LAZY_RW);
ret = bch2_alloc_write(c, BTREE_INSERT_LAZY_RW);
if (ret) {
bch_err(c, "error writing alloc info");
goto err;