bcachefs: bucket_lock() is now a sleepable lock

fsck_err() may sleep - it takes a mutex and may allocate memory, so
bucket_lock() needs to be a sleepable lock.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2023-09-27 19:51:29 -04:00
parent 3c40841cdc
commit 73bbeaa2de
2 changed files with 5 additions and 4 deletions

View File

@ -367,7 +367,6 @@ static inline int update_replicas(struct bch_fs *c, struct bkey_s_c k,
struct printbuf buf = PRINTBUF;
percpu_down_read(&c->mark_lock);
buf.atomic++;
idx = bch2_replicas_entry_idx(c, r);
if (idx < 0 &&
@ -795,7 +794,6 @@ static int mark_stripe_bucket(struct btree_trans *trans,
/* * XXX doesn't handle deletion */
percpu_down_read(&c->mark_lock);
buf.atomic++;
g = PTR_GC_BUCKET(ca, ptr);
if (g->dirty_sectors ||

View File

@ -70,12 +70,15 @@ union ulong_byte_assert {
static inline void bucket_unlock(struct bucket *b)
{
BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << BUCKET_LOCK_BITNR }).byte);
bit_spin_unlock(BUCKET_LOCK_BITNR, (void *) &b->lock);
clear_bit_unlock(BUCKET_LOCK_BITNR, (void *) &b->lock);
wake_up_bit((void *) &b->lock, BUCKET_LOCK_BITNR);
}
static inline void bucket_lock(struct bucket *b)
{
bit_spin_lock(BUCKET_LOCK_BITNR, (void *) &b->lock);
wait_on_bit_lock((void *) &b->lock, BUCKET_LOCK_BITNR,
TASK_UNINTERRUPTIBLE);
}
static inline struct bucket_array *gc_bucket_array(struct bch_dev *ca)