diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c index dc3aa70a0380..e6fe2a987574 100644 --- a/fs/bcachefs/btree_locking.c +++ b/fs/bcachefs/btree_locking.c @@ -112,10 +112,8 @@ static noinline void lock_graph_pop_all(struct lock_graph *g) lock_graph_up(g); } -static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans) +static void __lock_graph_down(struct lock_graph *g, struct btree_trans *trans) { - closure_get(&trans->ref); - g->g[g->nr++] = (struct trans_waiting_for_lock) { .trans = trans, .node_want = trans->locking, @@ -123,6 +121,12 @@ static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans) }; } +static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans) +{ + closure_get(&trans->ref); + __lock_graph_down(g, trans); +} + static bool lock_graph_remove_non_waiters(struct lock_graph *g) { struct trans_waiting_for_lock *i; @@ -223,10 +227,14 @@ static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans, struct trans_waiting_for_lock *i; for (i = g->g; i < g->g + g->nr; i++) - if (i->trans == trans) + if (i->trans == trans) { + closure_put(&trans->ref); return break_cycle(g, cycle); + } if (g->nr == ARRAY_SIZE(g->g)) { + closure_put(&trans->ref); + if (orig_trans->lock_may_not_fail) return 0; @@ -240,7 +248,7 @@ static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans, return btree_trans_restart(orig_trans, BCH_ERR_transaction_restart_deadlock_recursion_limit); } - lock_graph_down(g, trans); + __lock_graph_down(g, trans); return 0; } @@ -335,9 +343,10 @@ int bch2_check_for_deadlock(struct btree_trans *trans, struct printbuf *cycle) !lock_type_conflicts(lock_held, trans->locking_wait.lock_want)) continue; - ret = lock_graph_descend(&g, trans, cycle); + closure_get(&trans->ref); raw_spin_unlock(&b->lock.wait_lock); + ret = lock_graph_descend(&g, trans, cycle); if (ret) return ret; goto next;