bcachefs: More assertions for nocow locking

- assert in shutdown path that no nocow locks are held
 - check for overflow when taking nocow locks

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2023-09-24 16:25:06 -04:00
parent efedfc2ece
commit 1e3b40980b
3 changed files with 31 additions and 8 deletions

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
#include "bkey_methods.h"
#include "nocow_locking.h"
#include "util.h"
@ -29,9 +30,10 @@ void bch2_bucket_nocow_unlock(struct bucket_nocow_lock_table *t, struct bpos buc
for (i = 0; i < ARRAY_SIZE(l->b); i++)
if (l->b[i] == dev_bucket) {
BUG_ON(sign(atomic_read(&l->l[i])) != lock_val);
int v = atomic_sub_return(lock_val, &l->l[i]);
if (!atomic_sub_return(lock_val, &l->l[i]))
BUG_ON(v && sign(v) != lock_val);
if (!v)
closure_wake_up(&l->wait);
return;
}
@ -64,6 +66,11 @@ got_entry:
if (lock_val > 0 ? v < 0 : v > 0)
goto fail;
take_lock:
v = atomic_read(&l->l[i]);
/* Overflow? */
if (v && sign(v + lock_val) != sign(v))
goto fail;
atomic_add(lock_val, &l->l[i]);
spin_unlock(&l->lock);
return true;
@ -83,6 +90,7 @@ void __bch2_bucket_nocow_lock(struct bucket_nocow_lock_table *t,
}
void bch2_nocow_locks_to_text(struct printbuf *out, struct bucket_nocow_lock_table *t)
{
unsigned i, nr_zero = 0;
struct nocow_lock_bucket *l;
@ -102,9 +110,13 @@ void bch2_nocow_locks_to_text(struct printbuf *out, struct bucket_nocow_lock_tab
prt_printf(out, "(%u empty entries)\n", nr_zero);
nr_zero = 0;
for (i = 0; i < ARRAY_SIZE(l->l); i++)
if (atomic_read(&l->l[i]))
prt_printf(out, "%llu: %i ", l->b[i], atomic_read(&l->l[i]));
for (i = 0; i < ARRAY_SIZE(l->l); i++) {
int v = atomic_read(&l->l[i]);
if (v) {
bch2_bpos_to_text(out, u64_to_bucket(l->b[i]));
prt_printf(out, ": %s %u ", v < 0 ? "copy" : "update", abs(v));
}
}
prt_newline(out);
}
@ -112,12 +124,21 @@ void bch2_nocow_locks_to_text(struct printbuf *out, struct bucket_nocow_lock_tab
prt_printf(out, "(%u empty entries)\n", nr_zero);
}
void bch2_fs_nocow_locking_exit(struct bch_fs *c)
{
struct bucket_nocow_lock_table *t = &c->nocow_locks;
for (struct nocow_lock_bucket *l = t->l; l < t->l + ARRAY_SIZE(t->l); l++)
for (unsigned j = 0; j < ARRAY_SIZE(l->l); j++)
BUG_ON(atomic_read(&l->l[j]));
}
int bch2_fs_nocow_locking_init(struct bch_fs *c)
{
unsigned i;
struct bucket_nocow_lock_table *t = &c->nocow_locks;
for (i = 0; i < ARRAY_SIZE(c->nocow_locks.l); i++)
spin_lock_init(&c->nocow_locks.l[i].lock);
for (struct nocow_lock_bucket *l = t->l; l < t->l + ARRAY_SIZE(t->l); l++)
spin_lock_init(&l->lock);
return 0;
}

View File

@ -44,6 +44,7 @@ static inline bool bch2_bucket_nocow_trylock(struct bucket_nocow_lock_table *t,
void bch2_nocow_locks_to_text(struct printbuf *, struct bucket_nocow_lock_table *);
void bch2_fs_nocow_locking_exit(struct bch_fs *);
int bch2_fs_nocow_locking_init(struct bch_fs *);
#endif /* _BCACHEFS_NOCOW_LOCKING_H */

View File

@ -484,6 +484,7 @@ static void __bch2_fs_free(struct bch_fs *c)
bch2_fs_fsio_exit(c);
bch2_fs_ec_exit(c);
bch2_fs_encryption_exit(c);
bch2_fs_nocow_locking_exit(c);
bch2_fs_io_write_exit(c);
bch2_fs_io_read_exit(c);
bch2_fs_buckets_waiting_for_journal_exit(c);