bcachefs: for_each_member_device_rcu() now declares loop iter

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2023-12-17 02:34:05 -05:00
parent 9fea2274f7
commit 41b84fb489
10 changed files with 53 additions and 81 deletions

View File

@ -891,7 +891,6 @@ static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos
static bool next_bucket(struct bch_fs *c, struct bpos *bucket)
{
struct bch_dev *ca;
unsigned iter;
if (bch2_dev_bucket_exists(c, *bucket))
return true;
@ -909,8 +908,7 @@ static bool next_bucket(struct bch_fs *c, struct bpos *bucket)
}
rcu_read_lock();
iter = bucket->inode;
ca = __bch2_next_dev(c, &iter, NULL);
ca = __bch2_next_dev_idx(c, bucket->inode, NULL);
if (ca)
*bucket = POS(ca->dev_idx, ca->mi.first_bucket);
rcu_read_unlock();

View File

@ -69,11 +69,8 @@ const char * const bch2_watermarks[] = {
void bch2_reset_alloc_cursors(struct bch_fs *c)
{
struct bch_dev *ca;
unsigned i;
rcu_read_lock();
for_each_member_device_rcu(ca, c, i, NULL)
for_each_member_device_rcu(c, ca, NULL)
ca->alloc_cursor = 0;
rcu_read_unlock();
}

View File

@ -154,8 +154,7 @@ retry:
void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
{
struct bch_dev *ca;
unsigned i, u64s = fs_usage_u64s(c);
unsigned u64s = fs_usage_u64s(c);
BUG_ON(idx >= ARRAY_SIZE(c->usage));
@ -167,7 +166,7 @@ void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
rcu_read_lock();
for_each_member_device_rcu(ca, c, i, NULL) {
for_each_member_device_rcu(c, ca, NULL) {
u64s = dev_usage_u64s();
acc_u64s_percpu((u64 *) ca->usage_base,

View File

@ -89,19 +89,14 @@ err:
void bch2_disk_groups_to_text(struct printbuf *out, struct bch_fs *c)
{
struct bch_disk_groups_cpu *g;
struct bch_dev *ca;
int i;
unsigned iter;
out->atomic++;
rcu_read_lock();
g = rcu_dereference(c->disk_groups);
struct bch_disk_groups_cpu *g = rcu_dereference(c->disk_groups);
if (!g)
goto out;
for (i = 0; i < g->nr; i++) {
for (unsigned i = 0; i < g->nr; i++) {
if (i)
prt_printf(out, " ");
@ -111,7 +106,7 @@ void bch2_disk_groups_to_text(struct printbuf *out, struct bch_fs *c)
}
prt_printf(out, "[parent %d devs", g->entries[i].parent);
for_each_member_device_rcu(ca, c, iter, &g->entries[i].devs)
for_each_member_device_rcu(c, ca, &g->entries[i].devs)
prt_printf(out, " %s", ca->name);
prt_printf(out, "]");
}

View File

@ -1243,18 +1243,17 @@ static int unsigned_cmp(const void *_l, const void *_r)
static unsigned pick_blocksize(struct bch_fs *c,
struct bch_devs_mask *devs)
{
struct bch_dev *ca;
unsigned i, nr = 0, sizes[BCH_SB_MEMBERS_MAX];
unsigned nr = 0, sizes[BCH_SB_MEMBERS_MAX];
struct {
unsigned nr, size;
} cur = { 0, 0 }, best = { 0, 0 };
for_each_member_device_rcu(ca, c, i, devs)
for_each_member_device_rcu(c, ca, devs)
sizes[nr++] = ca->mi.bucket_size;
sort(sizes, nr, sizeof(unsigned), unsigned_cmp, NULL);
for (i = 0; i < nr; i++) {
for (unsigned i = 0; i < nr; i++) {
if (sizes[i] != cur.size) {
if (cur.nr > best.nr)
best = cur;
@ -1337,8 +1336,6 @@ ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
enum bch_watermark watermark)
{
struct ec_stripe_head *h;
struct bch_dev *ca;
unsigned i;
h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h)
@ -1355,13 +1352,13 @@ ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
rcu_read_lock();
h->devs = target_rw_devs(c, BCH_DATA_user, target);
for_each_member_device_rcu(ca, c, i, &h->devs)
for_each_member_device_rcu(c, ca, &h->devs)
if (!ca->mi.durability)
__clear_bit(i, h->devs.d);
__clear_bit(ca->dev_idx, h->devs.d);
h->blocksize = pick_blocksize(c, &h->devs);
for_each_member_device_rcu(ca, c, i, &h->devs)
for_each_member_device_rcu(c, ca, &h->devs)
if (ca->mi.bucket_size == h->blocksize)
h->nr_active_devs++;

View File

@ -1294,11 +1294,8 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
union journal_res_state s;
struct bch_dev *ca;
unsigned long now = jiffies;
u64 nr_writes = j->nr_flush_writes + j->nr_noflush_writes;
u64 seq;
unsigned i;
if (!out->nr_tabstops)
printbuf_tabstop_push(out, 24);
@ -1343,10 +1340,10 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
prt_newline(out);
for (seq = journal_cur_seq(j);
for (u64 seq = journal_cur_seq(j);
seq >= journal_last_unwritten_seq(j);
--seq) {
i = seq & JOURNAL_BUF_MASK;
unsigned i = seq & JOURNAL_BUF_MASK;
prt_printf(out, "unwritten entry:");
prt_tab(out);
@ -1390,8 +1387,7 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
j->space[journal_space_total].next_entry,
j->space[journal_space_total].total);
for_each_member_device_rcu(ca, c, i,
&c->rw_devs[BCH_DATA_journal]) {
for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
struct journal_device *ja = &ca->journal;
if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d))
@ -1400,7 +1396,7 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
if (!ja->nr)
continue;
prt_printf(out, "dev %u:\n", i);
prt_printf(out, "dev %u:\n", ca->dev_idx);
prt_printf(out, "\tnr\t\t%u\n", ja->nr);
prt_printf(out, "\tbucket size\t%u\n", ca->mi.bucket_size);
prt_printf(out, "\tavailable\t%u:%u\n", bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);

View File

@ -136,15 +136,13 @@ static struct journal_space __journal_space_available(struct journal *j, unsigne
enum journal_space_from from)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct bch_dev *ca;
unsigned i, pos, nr_devs = 0;
unsigned pos, nr_devs = 0;
struct journal_space space, dev_space[BCH_SB_MEMBERS_MAX];
BUG_ON(nr_devs_want > ARRAY_SIZE(dev_space));
rcu_read_lock();
for_each_member_device_rcu(ca, c, i,
&c->rw_devs[BCH_DATA_journal]) {
for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
if (!ca->journal.nr)
continue;
@ -173,19 +171,17 @@ static struct journal_space __journal_space_available(struct journal *j, unsigne
void bch2_journal_space_available(struct journal *j)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct bch_dev *ca;
unsigned clean, clean_ondisk, total;
unsigned max_entry_size = min(j->buf[0].buf_size >> 9,
j->buf[1].buf_size >> 9);
unsigned i, nr_online = 0, nr_devs_want;
unsigned nr_online = 0, nr_devs_want;
bool can_discard = false;
int ret = 0;
lockdep_assert_held(&j->lock);
rcu_read_lock();
for_each_member_device_rcu(ca, c, i,
&c->rw_devs[BCH_DATA_journal]) {
for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
struct journal_device *ja = &ca->journal;
if (!ja->nr)
@ -216,7 +212,7 @@ void bch2_journal_space_available(struct journal *j)
nr_devs_want = min_t(unsigned, nr_online, c->opts.metadata_replicas);
for (i = 0; i < journal_space_nr; i++)
for (unsigned i = 0; i < journal_space_nr; i++)
j->space[i] = __journal_space_available(j, nr_devs_want, i);
clean_ondisk = j->space[journal_space_clean_ondisk].total;

View File

@ -358,14 +358,12 @@ const struct bch_sb_field_ops bch_sb_field_ops_members_v2 = {
void bch2_sb_members_from_cpu(struct bch_fs *c)
{
struct bch_sb_field_members_v2 *mi = bch2_sb_field_get(c->disk_sb.sb, members_v2);
struct bch_dev *ca;
unsigned i, e;
rcu_read_lock();
for_each_member_device_rcu(ca, c, i, NULL) {
struct bch_member *m = __bch2_members_v2_get_mut(mi, i);
for_each_member_device_rcu(c, ca, NULL) {
struct bch_member *m = __bch2_members_v2_get_mut(mi, ca->dev_idx);
for (e = 0; e < BCH_MEMBER_ERROR_NR; e++)
for (unsigned e = 0; e < BCH_MEMBER_ERROR_NR; e++)
m->errors[e] = cpu_to_le64(atomic64_read(&ca->errors[e]));
}
rcu_read_unlock();

View File

@ -79,33 +79,38 @@ static inline struct bch_devs_list bch2_dev_list_single(unsigned dev)
return (struct bch_devs_list) { .nr = 1, .data[0] = dev };
}
static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, unsigned *iter,
const struct bch_devs_mask *mask)
static inline struct bch_dev *__bch2_next_dev_idx(struct bch_fs *c, unsigned idx,
const struct bch_devs_mask *mask)
{
struct bch_dev *ca = NULL;
while ((*iter = mask
? find_next_bit(mask->d, c->sb.nr_devices, *iter)
: *iter) < c->sb.nr_devices &&
!(ca = rcu_dereference_check(c->devs[*iter],
while ((idx = mask
? find_next_bit(mask->d, c->sb.nr_devices, idx)
: idx) < c->sb.nr_devices &&
!(ca = rcu_dereference_check(c->devs[idx],
lockdep_is_held(&c->state_lock))))
(*iter)++;
idx++;
return ca;
}
#define for_each_member_device_rcu(ca, c, iter, mask) \
for ((iter) = 0; ((ca) = __bch2_next_dev((c), &(iter), mask)); (iter)++)
static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, struct bch_dev *ca,
const struct bch_devs_mask *mask)
{
return __bch2_next_dev_idx(c, ca ? ca->dev_idx + 1 : 0, mask);
}
#define for_each_member_device_rcu(_c, _ca, _mask) \
for (struct bch_dev *_ca = NULL; \
(_ca = __bch2_next_dev((_c), _ca, (_mask)));)
static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, struct bch_dev *ca)
{
unsigned idx = ca ? ca->dev_idx + 1 : 0;
if (ca)
percpu_ref_put(&ca->ref);
rcu_read_lock();
if ((ca = __bch2_next_dev(c, &idx, NULL)))
if ((ca = __bch2_next_dev(c, ca, NULL)))
percpu_ref_get(&ca->ref);
rcu_read_unlock();
@ -126,16 +131,14 @@ static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c,
struct bch_dev *ca,
unsigned state_mask)
{
unsigned idx = ca ? ca->dev_idx + 1 : 0;
if (ca)
percpu_ref_put(&ca->io_ref);
rcu_read_lock();
while ((ca = __bch2_next_dev(c, &idx, NULL)) &&
while ((ca = __bch2_next_dev(c, ca, NULL)) &&
(!((1 << ca->mi.state) & state_mask) ||
!percpu_ref_tryget(&ca->io_ref)))
idx++;
;
rcu_read_unlock();
return ca;

View File

@ -167,14 +167,12 @@ static void __bch2_dev_read_only(struct bch_fs *, struct bch_dev *);
struct bch_fs *bch2_dev_to_fs(dev_t dev)
{
struct bch_fs *c;
struct bch_dev *ca;
unsigned i;
mutex_lock(&bch_fs_list_lock);
rcu_read_lock();
list_for_each_entry(c, &bch_fs_list, list)
for_each_member_device_rcu(ca, c, i, NULL)
for_each_member_device_rcu(c, ca, NULL)
if (ca->disk_sb.bdev && ca->disk_sb.bdev->bd_dev == dev) {
closure_get(&c->cl);
goto found;
@ -215,14 +213,13 @@ struct bch_fs *bch2_uuid_to_fs(__uuid_t uuid)
static void bch2_dev_usage_journal_reserve(struct bch_fs *c)
{
struct bch_dev *ca;
unsigned i, nr = 0, u64s =
unsigned nr = 0, u64s =
((sizeof(struct jset_entry_dev_usage) +
sizeof(struct jset_entry_dev_usage_type) * BCH_DATA_NR)) /
sizeof(u64);
rcu_read_lock();
for_each_member_device_rcu(ca, c, i, NULL)
for_each_member_device_rcu(c, ca, NULL)
nr++;
rcu_read_unlock();
@ -1906,18 +1903,14 @@ err:
/* return with ref on ca->ref: */
struct bch_dev *bch2_dev_lookup(struct bch_fs *c, const char *name)
{
struct bch_dev *ca;
unsigned i;
rcu_read_lock();
for_each_member_device_rcu(ca, c, i, NULL)
if (!strcmp(name, ca->name))
goto found;
ca = ERR_PTR(-BCH_ERR_ENOENT_dev_not_found);
found:
for_each_member_device_rcu(c, ca, NULL)
if (!strcmp(name, ca->name)) {
rcu_read_unlock();
return ca;
}
rcu_read_unlock();
return ca;
return ERR_PTR(-BCH_ERR_ENOENT_dev_not_found);
}
/* Filesystem open: */