bcachefs: Fix bch2_btree_node_fill() for !path

We shouldn't be doing the unlock/relock dance when we're not using a
path - this fixes an assertion pop when called from btree node scan.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2024-04-12 15:54:33 -04:00
parent 8cf2036e7b
commit e879389f57
1 changed files with 19 additions and 27 deletions

View File

@ -709,7 +709,6 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache; struct btree_cache *bc = &c->btree_cache;
struct btree *b; struct btree *b;
u32 seq;
if (unlikely(level >= BTREE_MAX_DEPTH)) { if (unlikely(level >= BTREE_MAX_DEPTH)) {
int ret = bch2_fs_topology_error(c, "attempting to get btree node at level %u, >= max depth %u", int ret = bch2_fs_topology_error(c, "attempting to get btree node at level %u, >= max depth %u",
@ -775,34 +774,26 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
} }
set_btree_node_read_in_flight(b); set_btree_node_read_in_flight(b);
six_unlock_write(&b->c.lock); six_unlock_write(&b->c.lock);
seq = six_lock_seq(&b->c.lock);
six_unlock_intent(&b->c.lock);
/* Unlock before doing IO: */
if (path && sync)
bch2_trans_unlock_noassert(trans);
bch2_btree_node_read(trans, b, sync);
if (!sync)
return NULL;
if (path) { if (path) {
int ret = bch2_trans_relock(trans) ?: u32 seq = six_lock_seq(&b->c.lock);
bch2_btree_path_relock_intent(trans, path);
if (ret) {
BUG_ON(!trans->restarted);
return ERR_PTR(ret);
}
}
if (!six_relock_type(&b->c.lock, lock_type, seq)) { /* Unlock before doing IO: */
BUG_ON(!path); six_unlock_intent(&b->c.lock);
bch2_trans_unlock_noassert(trans);
trace_and_count(c, trans_restart_relock_after_fill, trans, _THIS_IP_, path); bch2_btree_node_read(trans, b, sync);
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_after_fill));
if (!sync)
return NULL;
if (!six_relock_type(&b->c.lock, lock_type, seq))
b = NULL;
} else {
bch2_btree_node_read(trans, b, sync);
if (lock_type == SIX_LOCK_read)
six_lock_downgrade(&b->c.lock);
} }
return b; return b;
@ -1135,18 +1126,19 @@ int bch2_btree_node_prefetch(struct btree_trans *trans,
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache; struct btree_cache *bc = &c->btree_cache;
struct btree *b;
BUG_ON(path && !btree_node_locked(path, level + 1)); BUG_ON(path && !btree_node_locked(path, level + 1));
BUG_ON(level >= BTREE_MAX_DEPTH); BUG_ON(level >= BTREE_MAX_DEPTH);
b = btree_cache_find(bc, k); struct btree *b = btree_cache_find(bc, k);
if (b) if (b)
return 0; return 0;
b = bch2_btree_node_fill(trans, path, k, btree_id, b = bch2_btree_node_fill(trans, path, k, btree_id,
level, SIX_LOCK_read, false); level, SIX_LOCK_read, false);
return PTR_ERR_OR_ZERO(b); if (!IS_ERR_OR_NULL(b))
six_unlock_read(&b->c.lock);
return bch2_trans_relock(trans) ?: PTR_ERR_OR_ZERO(b);
} }
void bch2_btree_node_evict(struct btree_trans *trans, const struct bkey_i *k) void bch2_btree_node_evict(struct btree_trans *trans, const struct bkey_i *k)