bcachefs: Fix erasure coding locking

This adds a new helper, bch2_trans_mutex_lock(), for locking a mutex -
dropping and retaking btree locks as needed.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2023-02-17 22:43:47 -05:00
parent af0ee5bcf3
commit 73d86dfd88
4 changed files with 34 additions and 17 deletions

View File

@ -1073,7 +1073,7 @@ static bool try_decrease_writepoints(struct bch_fs *c,
return true;
}
static void bch2_trans_mutex_lock(struct btree_trans *trans,
static void bch2_trans_mutex_lock_norelock(struct btree_trans *trans,
struct mutex *lock)
{
if (!mutex_trylock(lock)) {
@ -1091,7 +1091,7 @@ static struct write_point *writepoint_find(struct btree_trans *trans,
if (!(write_point & 1UL)) {
wp = (struct write_point *) write_point;
bch2_trans_mutex_lock(trans, &wp->lock);
bch2_trans_mutex_lock_norelock(trans, &wp->lock);
return wp;
}
@ -1100,7 +1100,7 @@ restart_find:
wp = __writepoint_find(head, write_point);
if (wp) {
lock_wp:
bch2_trans_mutex_lock(trans, &wp->lock);
bch2_trans_mutex_lock_norelock(trans, &wp->lock);
if (wp->write_point == write_point)
goto out;
mutex_unlock(&wp->lock);
@ -1113,8 +1113,8 @@ restart_find_oldest:
if (!oldest || time_before64(wp->last_used, oldest->last_used))
oldest = wp;
bch2_trans_mutex_lock(trans, &oldest->lock);
bch2_trans_mutex_lock(trans, &c->write_points_hash_lock);
bch2_trans_mutex_lock_norelock(trans, &oldest->lock);
bch2_trans_mutex_lock_norelock(trans, &c->write_points_hash_lock);
if (oldest >= c->write_points + c->write_points_nr ||
try_increase_writepoints(c)) {
mutex_unlock(&c->write_points_hash_lock);

View File

@ -197,6 +197,15 @@ struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *,
void bch2_btree_path_level_init(struct btree_trans *, struct btree_path *, struct btree *);
int __bch2_trans_mutex_lock(struct btree_trans *, struct mutex *);
static inline int bch2_trans_mutex_lock(struct btree_trans *trans, struct mutex *lock)
{
return mutex_trylock(lock)
? 0
: __bch2_trans_mutex_lock(trans, lock);
}
#ifdef CONFIG_BCACHEFS_DEBUG
void bch2_trans_verify_paths(struct btree_trans *);
void bch2_assert_pos_locked(struct btree_trans *, enum btree_id,

View File

@ -725,6 +725,19 @@ bool bch2_trans_locked(struct btree_trans *trans)
return false;
}
int __bch2_trans_mutex_lock(struct btree_trans *trans,
struct mutex *lock)
{
int ret;
bch2_trans_unlock(trans);
mutex_lock(lock);
ret = bch2_trans_relock(trans);
if (ret)
mutex_unlock(lock);
return ret;
}
/* Debug */
#ifdef CONFIG_BCACHEFS_DEBUG

View File

@ -1231,7 +1231,7 @@ ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
return NULL;
mutex_init(&h->lock);
mutex_lock(&h->lock);
BUG_ON(!mutex_trylock(&h->lock));
h->target = target;
h->algo = algo;
@ -1280,23 +1280,18 @@ struct ec_stripe_head *__bch2_ec_stripe_head_get(struct btree_trans *trans,
if (!redundancy)
return NULL;
if (!mutex_trylock(&c->ec_stripe_head_lock)) {
bch2_trans_unlock(trans);
mutex_lock(&c->ec_stripe_head_lock);
ret = bch2_trans_relock(trans);
if (ret) {
mutex_unlock(&c->ec_stripe_head_lock);
return ERR_PTR(ret);
}
}
ret = bch2_trans_mutex_lock(trans, &c->ec_stripe_head_lock);
if (ret)
return ERR_PTR(ret);
list_for_each_entry(h, &c->ec_stripe_head_list, list)
if (h->target == target &&
h->algo == algo &&
h->redundancy == redundancy &&
h->copygc == copygc) {
mutex_lock(&h->lock);
ret = bch2_trans_mutex_lock(trans, &h->lock);
if (ret)
h = ERR_PTR(ret);
goto found;
}