bcachefs: bch_fs_usage_base

Split out base filesystem usage into its own type; prep work for
breaking up bch2_trans_fs_usage_apply().

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2023-11-17 00:03:45 -05:00
parent 4f564f4f9f
commit 8e7834a883
7 changed files with 45 additions and 47 deletions

View File

@ -1253,19 +1253,19 @@ static int bch2_gc_done(struct bch_fs *c,
bch2_acc_percpu_u64s((u64 __percpu *) c->usage_gc, nr);
copy_fs_field(fs_usage_hidden_wrong,
hidden, "hidden");
b.hidden, "hidden");
copy_fs_field(fs_usage_btree_wrong,
btree, "btree");
b.btree, "btree");
if (!metadata_only) {
copy_fs_field(fs_usage_data_wrong,
data, "data");
b.data, "data");
copy_fs_field(fs_usage_cached_wrong,
cached, "cached");
b.cached, "cached");
copy_fs_field(fs_usage_reserved_wrong,
reserved, "reserved");
b.reserved, "reserved");
copy_fs_field(fs_usage_nr_inodes_wrong,
nr_inodes,"nr_inodes");
b.nr_inodes,"nr_inodes");
for (i = 0; i < BCH_REPLICAS_MAX; i++)
copy_fs_field(fs_usage_persistent_reserved_wrong,

View File

@ -430,6 +430,9 @@ struct btree_trans {
struct journal_res journal_res;
u64 *journal_seq;
struct disk_reservation *disk_res;
struct bch_fs_usage_base fs_usage_delta;
unsigned journal_u64s;
unsigned extra_disk_res; /* XXX kill */
struct replicas_delta_list *fs_usage_deltas;

View File

@ -25,7 +25,7 @@
#include <linux/preempt.h>
static inline void fs_usage_data_type_to_base(struct bch_fs_usage *fs_usage,
static inline void fs_usage_data_type_to_base(struct bch_fs_usage_base *fs_usage,
enum bch_data_type data_type,
s64 sectors)
{
@ -54,20 +54,20 @@ void bch2_fs_usage_initialize(struct bch_fs *c)
bch2_fs_usage_acc_to_base(c, i);
for (unsigned i = 0; i < BCH_REPLICAS_MAX; i++)
usage->reserved += usage->persistent_reserved[i];
usage->b.reserved += usage->persistent_reserved[i];
for (unsigned i = 0; i < c->replicas.nr; i++) {
struct bch_replicas_entry_v1 *e =
cpu_replicas_entry(&c->replicas, i);
fs_usage_data_type_to_base(usage, e->data_type, usage->replicas[i]);
fs_usage_data_type_to_base(&usage->b, e->data_type, usage->replicas[i]);
}
for_each_member_device(c, ca) {
struct bch_dev_usage dev = bch2_dev_usage_read(ca);
usage->hidden += (dev.d[BCH_DATA_sb].buckets +
dev.d[BCH_DATA_journal].buckets) *
usage->b.hidden += (dev.d[BCH_DATA_sb].buckets +
dev.d[BCH_DATA_journal].buckets) *
ca->mi.bucket_size;
}
@ -188,15 +188,15 @@ void bch2_fs_usage_to_text(struct printbuf *out,
prt_printf(out, "capacity:\t\t\t%llu\n", c->capacity);
prt_printf(out, "hidden:\t\t\t\t%llu\n",
fs_usage->u.hidden);
fs_usage->u.b.hidden);
prt_printf(out, "data:\t\t\t\t%llu\n",
fs_usage->u.data);
fs_usage->u.b.data);
prt_printf(out, "cached:\t\t\t\t%llu\n",
fs_usage->u.cached);
fs_usage->u.b.cached);
prt_printf(out, "reserved:\t\t\t%llu\n",
fs_usage->u.reserved);
fs_usage->u.b.reserved);
prt_printf(out, "nr_inodes:\t\t\t%llu\n",
fs_usage->u.nr_inodes);
fs_usage->u.b.nr_inodes);
prt_printf(out, "online reserved:\t\t%llu\n",
fs_usage->online_reserved);
@ -225,10 +225,10 @@ static u64 reserve_factor(u64 r)
u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage_online *fs_usage)
{
return min(fs_usage->u.hidden +
fs_usage->u.btree +
fs_usage->u.data +
reserve_factor(fs_usage->u.reserved +
return min(fs_usage->u.b.hidden +
fs_usage->u.b.btree +
fs_usage->u.b.data +
reserve_factor(fs_usage->u.b.reserved +
fs_usage->online_reserved),
c->capacity);
}
@ -240,17 +240,17 @@ __bch2_fs_usage_read_short(struct bch_fs *c)
u64 data, reserved;
ret.capacity = c->capacity -
bch2_fs_usage_read_one(c, &c->usage_base->hidden);
bch2_fs_usage_read_one(c, &c->usage_base->b.hidden);
data = bch2_fs_usage_read_one(c, &c->usage_base->data) +
bch2_fs_usage_read_one(c, &c->usage_base->btree);
reserved = bch2_fs_usage_read_one(c, &c->usage_base->reserved) +
data = bch2_fs_usage_read_one(c, &c->usage_base->b.data) +
bch2_fs_usage_read_one(c, &c->usage_base->b.btree);
reserved = bch2_fs_usage_read_one(c, &c->usage_base->b.reserved) +
percpu_u64_get(c->online_reserved);
ret.used = min(ret.capacity, data + reserve_factor(reserved));
ret.free = ret.capacity - ret.used;
ret.nr_inodes = bch2_fs_usage_read_one(c, &c->usage_base->nr_inodes);
ret.nr_inodes = bch2_fs_usage_read_one(c, &c->usage_base->b.nr_inodes);
return ret;
}
@ -308,9 +308,9 @@ void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
fs_usage = fs_usage_ptr(c, journal_seq, gc);
if (data_type_is_hidden(old->data_type))
fs_usage->hidden -= ca->mi.bucket_size;
fs_usage->b.hidden -= ca->mi.bucket_size;
if (data_type_is_hidden(new->data_type))
fs_usage->hidden += ca->mi.bucket_size;
fs_usage->b.hidden += ca->mi.bucket_size;
u = dev_usage_ptr(ca, journal_seq, gc);
@ -359,7 +359,7 @@ static inline int __update_replicas(struct bch_fs *c,
if (idx < 0)
return -1;
fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
fs_usage_data_type_to_base(&fs_usage->b, r->data_type, sectors);
fs_usage->replicas[idx] += sectors;
return 0;
}
@ -394,7 +394,7 @@ int bch2_update_replicas(struct bch_fs *c, struct bkey_s_c k,
preempt_disable();
fs_usage = fs_usage_ptr(c, journal_seq, gc);
fs_usage_data_type_to_base(fs_usage, r->data_type, sectors);
fs_usage_data_type_to_base(&fs_usage->b, r->data_type, sectors);
fs_usage->replicas[idx] += sectors;
preempt_enable();
err:
@ -677,11 +677,11 @@ void bch2_trans_fs_usage_revert(struct btree_trans *trans,
BUG_ON(__update_replicas(c, dst, &d->r, -d->delta));
}
dst->nr_inodes -= deltas->nr_inodes;
dst->b.nr_inodes -= deltas->nr_inodes;
for (i = 0; i < BCH_REPLICAS_MAX; i++) {
added -= deltas->persistent_reserved[i];
dst->reserved -= deltas->persistent_reserved[i];
dst->b.reserved -= deltas->persistent_reserved[i];
dst->persistent_reserved[i] -= deltas->persistent_reserved[i];
}
@ -723,11 +723,11 @@ int bch2_trans_fs_usage_apply(struct btree_trans *trans,
goto need_mark;
}
dst->nr_inodes += deltas->nr_inodes;
dst->b.nr_inodes += deltas->nr_inodes;
for (i = 0; i < BCH_REPLICAS_MAX; i++) {
added += deltas->persistent_reserved[i];
dst->reserved += deltas->persistent_reserved[i];
dst->b.reserved += deltas->persistent_reserved[i];
dst->persistent_reserved[i] += deltas->persistent_reserved[i];
}
@ -1084,7 +1084,7 @@ static int __trigger_reservation(struct btree_trans *trans,
struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage_gc);
replicas = min(replicas, ARRAY_SIZE(fs_usage->persistent_reserved));
fs_usage->reserved += sectors;
fs_usage->b.reserved += sectors;
fs_usage->persistent_reserved[replicas - 1] += sectors;
preempt_enable();

View File

@ -45,23 +45,18 @@ struct bch_dev_usage {
} d[BCH_DATA_NR];
};
struct bch_fs_usage {
/* all fields are in units of 512 byte sectors: */
struct bch_fs_usage_base {
u64 hidden;
u64 btree;
u64 data;
u64 cached;
u64 reserved;
u64 nr_inodes;
};
/* XXX: add stats for compression ratio */
#if 0
u64 uncompressed;
u64 compressed;
#endif
/* broken out: */
struct bch_fs_usage {
/* all fields are in units of 512 byte sectors: */
struct bch_fs_usage_base b;
u64 persistent_reserved[BCH_REPLICAS_MAX];
u64 replicas[];
};

View File

@ -597,7 +597,7 @@ int bch2_trigger_inode(struct btree_trans *trans,
struct bch_fs *c = trans->c;
percpu_down_read(&c->mark_lock);
this_cpu_add(c->usage_gc->nr_inodes, nr);
this_cpu_add(c->usage_gc->b.nr_inodes, nr);
percpu_up_read(&c->mark_lock);
}

View File

@ -280,7 +280,7 @@ static int journal_replay_entry_early(struct bch_fs *c,
le64_to_cpu(u->v);
break;
case BCH_FS_USAGE_inodes:
c->usage_base->nr_inodes = le64_to_cpu(u->v);
c->usage_base->b.nr_inodes = le64_to_cpu(u->v);
break;
case BCH_FS_USAGE_key_version:
atomic64_set(&c->key_version,

View File

@ -207,7 +207,7 @@ void bch2_journal_super_entries_add_common(struct bch_fs *c,
u->entry.type = BCH_JSET_ENTRY_usage;
u->entry.btree_id = BCH_FS_USAGE_inodes;
u->v = cpu_to_le64(c->usage_base->nr_inodes);
u->v = cpu_to_le64(c->usage_base->b.nr_inodes);
}
{