bcachefs: Use x-macros for data types

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2020-07-09 18:28:11 -04:00 committed by Kent Overstreet
parent 912bdf17a8
commit 89fd25be70
21 changed files with 110 additions and 108 deletions

View file

@ -53,10 +53,10 @@ static void pd_controllers_update(struct work_struct *work)
* reclaimed by copy GC
*/
s64 fragmented = (bucket_to_sector(ca,
stats.buckets[BCH_DATA_USER] +
stats.buckets[BCH_DATA_CACHED]) -
(stats.sectors[BCH_DATA_USER] +
stats.sectors[BCH_DATA_CACHED])) << 9;
stats.buckets[BCH_DATA_user] +
stats.buckets[BCH_DATA_cached]) -
(stats.sectors[BCH_DATA_user] +
stats.sectors[BCH_DATA_cached])) << 9;
fragmented = max(0LL, fragmented);

View file

@ -534,7 +534,7 @@ static void get_buckets_from_writepoint(struct bch_fs *c,
if (*nr_effective < nr_replicas &&
test_bit(ob->ptr.dev, devs_may_alloc->d) &&
(ca->mi.durability ||
(wp->type == BCH_DATA_USER && !*have_cache)) &&
(wp->type == BCH_DATA_user && !*have_cache)) &&
(ob->ec || !need_ec)) {
add_new_bucket(c, ptrs, devs_may_alloc,
nr_effective, have_cache,
@ -813,11 +813,11 @@ struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
wp = writepoint_find(c, write_point.v);
if (wp->type == BCH_DATA_USER)
if (wp->type == BCH_DATA_user)
ob_flags |= BUCKET_MAY_ALLOC_PARTIAL;
/* metadata may not allocate on cache devices: */
if (wp->type != BCH_DATA_USER)
if (wp->type != BCH_DATA_user)
have_cache = true;
if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
@ -856,7 +856,7 @@ struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
/* Free buckets we didn't use: */
open_bucket_for_each(c, &wp->ptrs, ob, i)
open_bucket_free_unused(c, ob, wp->type == BCH_DATA_USER);
open_bucket_free_unused(c, ob, wp->type == BCH_DATA_user);
wp->ptrs = ptrs;
@ -876,7 +876,7 @@ struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
ob_push(c, &ptrs, ob);
else
open_bucket_free_unused(c, ob,
wp->type == BCH_DATA_USER);
wp->type == BCH_DATA_user);
wp->ptrs = ptrs;
mutex_unlock(&wp->lock);
@ -907,7 +907,7 @@ void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
struct bch_extent_ptr tmp = ob->ptr;
tmp.cached = !ca->mi.durability &&
wp->type == BCH_DATA_USER;
wp->type == BCH_DATA_user;
tmp.offset += ca->mi.bucket_size - ob->sectors_free;
bch2_bkey_append_ptr(k, tmp);
@ -956,12 +956,12 @@ void bch2_fs_allocator_foreground_init(struct bch_fs *c)
c->open_buckets_freelist = ob - c->open_buckets;
}
writepoint_init(&c->btree_write_point, BCH_DATA_BTREE);
writepoint_init(&c->rebalance_write_point, BCH_DATA_USER);
writepoint_init(&c->btree_write_point, BCH_DATA_btree);
writepoint_init(&c->rebalance_write_point, BCH_DATA_user);
for (wp = c->write_points;
wp < c->write_points + c->write_points_nr; wp++) {
writepoint_init(wp, BCH_DATA_USER);
writepoint_init(wp, BCH_DATA_user);
wp->last_used = sched_clock();
wp->write_point = (unsigned long) wp;

View file

@ -1030,14 +1030,19 @@ LE64_BITMASK(BCH_KDF_SCRYPT_P, struct bch_sb_field_crypt, kdf_flags, 32, 48);
/* BCH_SB_FIELD_replicas: */
#define BCH_DATA_TYPES() \
x(none, 0) \
x(sb, 1) \
x(journal, 2) \
x(btree, 3) \
x(user, 4) \
x(cached, 5)
enum bch_data_type {
BCH_DATA_NONE = 0,
BCH_DATA_SB = 1,
BCH_DATA_JOURNAL = 2,
BCH_DATA_BTREE = 3,
BCH_DATA_USER = 4,
BCH_DATA_CACHED = 5,
BCH_DATA_NR = 6,
#define x(t, n) BCH_DATA_##t,
BCH_DATA_TYPES()
#undef x
BCH_DATA_NR
};
struct bch_replicas_entry_v0 {

View file

@ -435,16 +435,16 @@ void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
if (offset == BCH_SB_SECTOR)
mark_metadata_sectors(c, ca, 0, BCH_SB_SECTOR,
BCH_DATA_SB, flags);
BCH_DATA_sb, flags);
mark_metadata_sectors(c, ca, offset,
offset + (1 << layout->sb_max_size_bits),
BCH_DATA_SB, flags);
BCH_DATA_sb, flags);
}
for (i = 0; i < ca->journal.nr; i++) {
b = ca->journal.buckets[i];
bch2_mark_metadata_bucket(c, ca, b, BCH_DATA_JOURNAL,
bch2_mark_metadata_bucket(c, ca, b, BCH_DATA_journal,
ca->mi.bucket_size,
gc_phase(GC_PHASE_SB), flags);
}
@ -678,8 +678,8 @@ static int bch2_gc_done(struct bch_fs *c,
char buf[80];
if (metadata_only &&
(e->data_type == BCH_DATA_USER ||
e->data_type == BCH_DATA_CACHED))
(e->data_type == BCH_DATA_user ||
e->data_type == BCH_DATA_cached))
continue;
bch2_replicas_entry_to_text(&PBUF(buf), e);
@ -764,8 +764,8 @@ static int bch2_gc_start(struct bch_fs *c,
d->gen_valid = s->gen_valid;
if (metadata_only &&
(s->mark.data_type == BCH_DATA_USER ||
s->mark.data_type == BCH_DATA_CACHED)) {
(s->mark.data_type == BCH_DATA_user ||
s->mark.data_type == BCH_DATA_cached)) {
d->_mark = s->mark;
d->_mark.owned_by_allocator = 0;
}

View file

@ -1231,7 +1231,7 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
set_btree_node_read_in_flight(b);
if (rb->have_ioref) {
this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_BTREE],
this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
bio_sectors(bio));
bio_set_dev(bio, ca->disk_sb.bdev);
@ -1701,7 +1701,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
b->written += sectors_to_write;
/* XXX: submitting IO with btree locks held: */
bch2_submit_wbio_replicas(&wbio->wbio, c, BCH_DATA_BTREE, &k.key);
bch2_submit_wbio_replicas(&wbio->wbio, c, BCH_DATA_btree, &k.key);
return;
err:
set_btree_node_noevict(b);

View file

@ -133,13 +133,13 @@ void bch2_fs_usage_initialize(struct bch_fs *c)
cpu_replicas_entry(&c->replicas, i);
switch (e->data_type) {
case BCH_DATA_BTREE:
case BCH_DATA_btree:
usage->btree += usage->replicas[i];
break;
case BCH_DATA_USER:
case BCH_DATA_user:
usage->data += usage->replicas[i];
break;
case BCH_DATA_CACHED:
case BCH_DATA_cached:
usage->cached += usage->replicas[i];
break;
}
@ -367,7 +367,7 @@ static inline int is_fragmented_bucket(struct bucket_mark m,
struct bch_dev *ca)
{
if (!m.owned_by_allocator &&
m.data_type == BCH_DATA_USER &&
m.data_type == BCH_DATA_user &&
bucket_sectors_used(m))
return max_t(int, 0, (int) ca->mi.bucket_size -
bucket_sectors_used(m));
@ -382,7 +382,7 @@ static inline int bucket_stripe_sectors(struct bucket_mark m)
static inline enum bch_data_type bucket_type(struct bucket_mark m)
{
return m.cached_sectors && !m.dirty_sectors
? BCH_DATA_CACHED
? BCH_DATA_cached
: m.data_type;
}
@ -437,7 +437,7 @@ static inline void account_bucket(struct bch_fs_usage *fs_usage,
enum bch_data_type type,
int nr, s64 size)
{
if (type == BCH_DATA_SB || type == BCH_DATA_JOURNAL)
if (type == BCH_DATA_sb || type == BCH_DATA_journal)
fs_usage->hidden += size;
dev_usage->buckets[type] += nr;
@ -472,7 +472,7 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
u->sectors[old.data_type] -= old.dirty_sectors;
u->sectors[new.data_type] += new.dirty_sectors;
u->sectors[BCH_DATA_CACHED] +=
u->sectors[BCH_DATA_cached] +=
(int) new.cached_sectors - (int) old.cached_sectors;
u->sectors_fragmented +=
is_fragmented_bucket(new, ca) - is_fragmented_bucket(old, ca);
@ -520,13 +520,13 @@ static inline int update_replicas(struct bch_fs *c,
return 0;
switch (r->data_type) {
case BCH_DATA_BTREE:
case BCH_DATA_btree:
fs_usage->btree += sectors;
break;
case BCH_DATA_USER:
case BCH_DATA_user:
fs_usage->data += sectors;
break;
case BCH_DATA_CACHED:
case BCH_DATA_cached:
fs_usage->cached += sectors;
break;
}
@ -798,8 +798,8 @@ static int __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
struct bucket_mark old, new;
bool overflow;
BUG_ON(data_type != BCH_DATA_SB &&
data_type != BCH_DATA_JOURNAL);
BUG_ON(data_type != BCH_DATA_sb &&
data_type != BCH_DATA_journal);
old = bucket_cmpxchg(g, new, ({
new.data_type = data_type;
@ -830,8 +830,8 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
unsigned sectors, struct gc_pos pos,
unsigned flags)
{
BUG_ON(type != BCH_DATA_SB &&
type != BCH_DATA_JOURNAL);
BUG_ON(type != BCH_DATA_sb &&
type != BCH_DATA_journal);
preempt_disable();
@ -1123,7 +1123,7 @@ static int bch2_mark_extent(struct bch_fs *c,
BUG_ON(!sectors);
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
s64 disk_sectors = data_type == BCH_DATA_BTREE
s64 disk_sectors = data_type == BCH_DATA_btree
? sectors
: ptr_disk_sectors_delta(p, offset, sectors, flags);
@ -1285,12 +1285,12 @@ static int bch2_mark_key_locked(struct bch_fs *c,
: -c->opts.btree_node_size;
ret = bch2_mark_extent(c, old, new, offset, sectors,
BCH_DATA_BTREE, fs_usage, journal_seq, flags);
BCH_DATA_btree, fs_usage, journal_seq, flags);
break;
case KEY_TYPE_extent:
case KEY_TYPE_reflink_v:
ret = bch2_mark_extent(c, old, new, offset, sectors,
BCH_DATA_USER, fs_usage, journal_seq, flags);
BCH_DATA_user, fs_usage, journal_seq, flags);
break;
case KEY_TYPE_stripe:
ret = bch2_mark_stripe(c, old, new, fs_usage, journal_seq, flags);
@ -1668,7 +1668,7 @@ static int bch2_trans_mark_extent(struct btree_trans *trans,
BUG_ON(!sectors);
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
s64 disk_sectors = data_type == BCH_DATA_BTREE
s64 disk_sectors = data_type == BCH_DATA_btree
? sectors
: ptr_disk_sectors_delta(p, offset, sectors, flags);
@ -1810,11 +1810,11 @@ int bch2_trans_mark_key(struct btree_trans *trans, struct bkey_s_c k,
: -c->opts.btree_node_size;
return bch2_trans_mark_extent(trans, k, offset, sectors,
flags, BCH_DATA_BTREE);
flags, BCH_DATA_btree);
case KEY_TYPE_extent:
case KEY_TYPE_reflink_v:
return bch2_trans_mark_extent(trans, k, offset, sectors,
flags, BCH_DATA_USER);
flags, BCH_DATA_user);
case KEY_TYPE_inode:
d = replicas_deltas_realloc(trans, 0);

View file

@ -99,9 +99,9 @@ static inline enum bch_data_type ptr_data_type(const struct bkey *k,
{
if (k->type == KEY_TYPE_btree_ptr ||
k->type == KEY_TYPE_btree_ptr_v2)
return BCH_DATA_BTREE;
return BCH_DATA_btree;
return ptr->cached ? BCH_DATA_CACHED : BCH_DATA_USER;
return ptr->cached ? BCH_DATA_cached : BCH_DATA_user;
}
static inline struct bucket_mark ptr_bucket_mark(struct bch_dev *ca,

View file

@ -1144,7 +1144,7 @@ ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
h->redundancy = redundancy;
rcu_read_lock();
h->devs = target_rw_devs(c, BCH_DATA_USER, target);
h->devs = target_rw_devs(c, BCH_DATA_user, target);
for_each_member_device_rcu(ca, c, i, &h->devs)
if (!ca->mi.durability)

View file

@ -193,7 +193,7 @@ void bch2_btree_ptr_debugcheck(struct bch_fs *c, struct bkey_s_c k)
goto err;
err = "inconsistent";
if (mark.data_type != BCH_DATA_BTREE ||
if (mark.data_type != BCH_DATA_btree ||
mark.dirty_sectors < c->opts.btree_node_size)
goto err;
}
@ -288,7 +288,7 @@ void bch2_extent_debugcheck(struct bch_fs *c, struct bkey_s_c k)
"key too stale: %i", stale);
bch2_fs_inconsistent_on(!stale &&
(mark.data_type != BCH_DATA_USER ||
(mark.data_type != BCH_DATA_user ||
mark_sectors < disk_sectors), c,
"extent pointer not marked: %s:\n"
"type %u sectors %u < %u",

View file

@ -486,7 +486,7 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
bio_set_dev(&n->bio, ca->disk_sb.bdev);
if (type != BCH_DATA_BTREE && unlikely(c->opts.no_data_io)) {
if (type != BCH_DATA_btree && unlikely(c->opts.no_data_io)) {
bio_endio(&n->bio);
continue;
}
@ -1128,7 +1128,7 @@ static void __bch2_write(struct closure *cl)
key_to_write = (void *) (op->insert_keys.keys_p +
key_to_write_offset);
bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_USER,
bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
key_to_write);
} while (ret);
@ -2170,7 +2170,7 @@ int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig,
goto out;
}
this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_USER],
this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_user],
bio_sectors(&rbio->bio));
bio_set_dev(&rbio->bio, ca->disk_sb.bdev);

View file

@ -846,7 +846,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
if (pos <= ja->cur_idx)
ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
bch2_mark_metadata_bucket(c, ca, bucket, BCH_DATA_JOURNAL,
bch2_mark_metadata_bucket(c, ca, bucket, BCH_DATA_journal,
ca->mi.bucket_size,
gc_phase(GC_PHASE_SB),
0);
@ -1198,7 +1198,7 @@ ssize_t bch2_journal_print_debug(struct journal *j, char *buf)
test_bit(JOURNAL_REPLAY_DONE, &j->flags));
for_each_member_device_rcu(ca, c, iter,
&c->rw_devs[BCH_DATA_JOURNAL]) {
&c->rw_devs[BCH_DATA_journal]) {
struct journal_device *ja = &ca->journal;
if (!ja->nr)

View file

@ -660,7 +660,7 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list)
for_each_member_device(ca, c, iter) {
if (!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
!(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_JOURNAL)))
!(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal)))
continue;
if ((ca->mi.state == BCH_MEMBER_STATE_RW ||
@ -694,7 +694,7 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list)
* the devices - this is wrong:
*/
bch2_devlist_to_replicas(&replicas.e, BCH_DATA_JOURNAL, i->devs);
bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal, i->devs);
if (!degraded &&
(test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
@ -795,7 +795,7 @@ static int journal_write_alloc(struct journal *j, struct journal_buf *w,
rcu_read_lock();
devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe,
&c->rw_devs[BCH_DATA_JOURNAL]);
&c->rw_devs[BCH_DATA_journal]);
__journal_write_alloc(j, w, &devs_sorted,
sectors, &replicas, replicas_want);
@ -913,7 +913,7 @@ static void journal_write_done(struct closure *cl)
goto err;
}
bch2_devlist_to_replicas(&replicas.e, BCH_DATA_JOURNAL, devs);
bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal, devs);
if (bch2_mark_replicas(c, &replicas.e))
goto err;
@ -1105,7 +1105,7 @@ void bch2_journal_write(struct closure *cl)
continue;
}
this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_JOURNAL],
this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal],
sectors);
bio = ca->journal.bio;

View file

@ -70,7 +70,7 @@ static struct journal_space {
rcu_read_lock();
for_each_member_device_rcu(ca, c, i,
&c->rw_devs[BCH_DATA_JOURNAL]) {
&c->rw_devs[BCH_DATA_journal]) {
struct journal_device *ja = &ca->journal;
unsigned buckets_this_device, sectors_this_device;
@ -139,7 +139,7 @@ void bch2_journal_space_available(struct journal *j)
rcu_read_lock();
for_each_member_device_rcu(ca, c, i,
&c->rw_devs[BCH_DATA_JOURNAL]) {
&c->rw_devs[BCH_DATA_journal]) {
struct journal_device *ja = &ca->journal;
if (!ja->nr)
@ -618,7 +618,7 @@ int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
return ret;
mutex_lock(&c->replicas_gc_lock);
bch2_replicas_gc_start(c, 1 << BCH_DATA_JOURNAL);
bch2_replicas_gc_start(c, 1 << BCH_DATA_journal);
seq = 0;
@ -627,7 +627,7 @@ int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
struct bch_replicas_padded replicas;
seq = max(seq, journal_last_seq(j));
bch2_devlist_to_replicas(&replicas.e, BCH_DATA_JOURNAL,
bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
journal_seq_pin(j, seq)->devs);
seq++;

View file

@ -516,7 +516,7 @@ static int __bch2_move_data(struct bch_fs *c,
bkey_on_stack_init(&sk);
bch2_trans_init(&trans, c, 0, 0);
stats->data_type = BCH_DATA_USER;
stats->data_type = BCH_DATA_user;
stats->btree_id = btree_id;
stats->pos = POS_MIN;
@ -641,7 +641,7 @@ int bch2_move_data(struct bch_fs *c,
INIT_LIST_HEAD(&ctxt.reads);
init_waitqueue_head(&ctxt.wait);
stats->data_type = BCH_DATA_USER;
stats->data_type = BCH_DATA_user;
ret = __bch2_move_data(c, &ctxt, rate, wp, start, end,
pred, arg, stats, BTREE_ID_EXTENTS) ?:
@ -676,7 +676,7 @@ static int bch2_move_btree(struct bch_fs *c,
bch2_trans_init(&trans, c, 0, 0);
stats->data_type = BCH_DATA_BTREE;
stats->data_type = BCH_DATA_btree;
for (id = 0; id < BTREE_ID_NR; id++) {
stats->btree_id = id;
@ -772,7 +772,7 @@ int bch2_data_job(struct bch_fs *c,
switch (op.op) {
case BCH_DATA_OP_REREPLICATE:
stats->data_type = BCH_DATA_JOURNAL;
stats->data_type = BCH_DATA_journal;
ret = bch2_journal_flush_device_pins(&c->journal, -1);
ret = bch2_move_btree(c, rereplicate_pred, c, stats) ?: ret;
@ -793,7 +793,7 @@ int bch2_data_job(struct bch_fs *c,
if (op.migrate.dev >= c->sb.nr_devices)
return -EINVAL;
stats->data_type = BCH_DATA_JOURNAL;
stats->data_type = BCH_DATA_journal;
ret = bch2_journal_flush_device_pins(&c->journal, op.migrate.dev);
ret = bch2_move_btree(c, migrate_pred, &op, stats) ?: ret;

View file

@ -160,7 +160,7 @@ static void bch2_copygc(struct bch_fs *c, struct bch_dev *ca)
struct copygc_heap_entry e;
if (m.owned_by_allocator ||
m.data_type != BCH_DATA_USER ||
m.data_type != BCH_DATA_user ||
!bucket_sectors_used(m) ||
bucket_sectors_used(m) >= ca->mi.bucket_size)
continue;

View file

@ -45,12 +45,9 @@ const char * const bch2_str_hash_types[] = {
};
const char * const bch2_data_types[] = {
"none",
"sb",
"journal",
"btree",
"data",
"cached",
#define x(t, n) #t,
BCH_DATA_TYPES()
#undef x
NULL
};

View file

@ -113,16 +113,16 @@ void bch2_bkey_to_replicas(struct bch_replicas_entry *e,
switch (k.k->type) {
case KEY_TYPE_btree_ptr:
case KEY_TYPE_btree_ptr_v2:
e->data_type = BCH_DATA_BTREE;
e->data_type = BCH_DATA_btree;
extent_to_replicas(k, e);
break;
case KEY_TYPE_extent:
case KEY_TYPE_reflink_v:
e->data_type = BCH_DATA_USER;
e->data_type = BCH_DATA_user;
extent_to_replicas(k, e);
break;
case KEY_TYPE_stripe:
e->data_type = BCH_DATA_USER;
e->data_type = BCH_DATA_user;
stripe_to_replicas(k, e);
break;
}
@ -137,7 +137,7 @@ void bch2_devlist_to_replicas(struct bch_replicas_entry *e,
unsigned i;
BUG_ON(!data_type ||
data_type == BCH_DATA_SB ||
data_type == BCH_DATA_sb ||
data_type >= BCH_DATA_NR);
e->data_type = data_type;
@ -614,7 +614,7 @@ int bch2_replicas_gc2(struct bch_fs *c)
struct bch_replicas_entry *e =
cpu_replicas_entry(&c->replicas, i);
if (e->data_type == BCH_DATA_JOURNAL ||
if (e->data_type == BCH_DATA_journal ||
c->usage_base->replicas[i] ||
percpu_u64_get(&c->usage[0]->replicas[i]) ||
percpu_u64_get(&c->usage[1]->replicas[i]))
@ -1040,13 +1040,13 @@ static bool have_enough_devs(struct replicas_status s,
bool bch2_have_enough_devs(struct replicas_status s, unsigned flags)
{
return (have_enough_devs(s, BCH_DATA_JOURNAL,
return (have_enough_devs(s, BCH_DATA_journal,
flags & BCH_FORCE_IF_METADATA_DEGRADED,
flags & BCH_FORCE_IF_METADATA_LOST) &&
have_enough_devs(s, BCH_DATA_BTREE,
have_enough_devs(s, BCH_DATA_btree,
flags & BCH_FORCE_IF_METADATA_DEGRADED,
flags & BCH_FORCE_IF_METADATA_LOST) &&
have_enough_devs(s, BCH_DATA_USER,
have_enough_devs(s, BCH_DATA_user,
flags & BCH_FORCE_IF_DATA_DEGRADED,
flags & BCH_FORCE_IF_DATA_LOST));
}
@ -1056,9 +1056,9 @@ int bch2_replicas_online(struct bch_fs *c, bool meta)
struct replicas_status s = bch2_replicas_status(c);
return (meta
? min(s.replicas[BCH_DATA_JOURNAL].redundancy,
s.replicas[BCH_DATA_BTREE].redundancy)
: s.replicas[BCH_DATA_USER].redundancy) + 1;
? min(s.replicas[BCH_DATA_journal].redundancy,
s.replicas[BCH_DATA_btree].redundancy)
: s.replicas[BCH_DATA_user].redundancy) + 1;
}
unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca)

View file

@ -36,7 +36,7 @@ int bch2_mark_bkey_replicas(struct bch_fs *, struct bkey_s_c);
static inline void bch2_replicas_entry_cached(struct bch_replicas_entry *e,
unsigned dev)
{
e->data_type = BCH_DATA_CACHED;
e->data_type = BCH_DATA_cached;
e->nr_devs = 1;
e->nr_required = 1;
e->devs[0] = dev;

View file

@ -659,7 +659,7 @@ static void read_back_super(struct bch_fs *c, struct bch_dev *ca)
bio->bi_private = ca;
bch2_bio_map(bio, ca->sb_read_scratch, PAGE_SIZE);
this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_SB],
this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_sb],
bio_sectors(bio));
percpu_ref_get(&ca->io_ref);
@ -685,7 +685,7 @@ static void write_one_super(struct bch_fs *c, struct bch_dev *ca, unsigned idx)
roundup((size_t) vstruct_bytes(sb),
bdev_logical_block_size(ca->disk_sb.bdev)));
this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_SB],
this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_sb],
bio_sectors(bio));
percpu_ref_get(&ca->io_ref);

View file

@ -1076,7 +1076,7 @@ static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
init_rwsem(&ca->bucket_lock);
writepoint_init(&ca->copygc_write_point, BCH_DATA_USER);
writepoint_init(&ca->copygc_write_point, BCH_DATA_user);
bch2_dev_copygc_init(ca);
@ -1207,7 +1207,7 @@ static int bch2_dev_attach_bdev(struct bch_fs *c, struct bch_sb_handle *sb)
return ret;
if (test_bit(BCH_FS_ALLOC_READ_DONE, &c->flags) &&
!percpu_u64_get(&ca->usage[0]->buckets[BCH_DATA_SB])) {
!percpu_u64_get(&ca->usage[0]->buckets[BCH_DATA_sb])) {
mutex_lock(&c->sb_lock);
bch2_mark_dev_superblock(ca->fs, ca, 0);
mutex_unlock(&c->sb_lock);

View file

@ -868,18 +868,18 @@ static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf)
fifo_used(&ca->free[RESERVE_MOVINGGC]), ca->free[RESERVE_MOVINGGC].size,
fifo_used(&ca->free[RESERVE_NONE]), ca->free[RESERVE_NONE].size,
ca->mi.nbuckets - ca->mi.first_bucket,
stats.buckets[BCH_DATA_SB],
stats.buckets[BCH_DATA_JOURNAL],
stats.buckets[BCH_DATA_BTREE],
stats.buckets[BCH_DATA_USER],
stats.buckets[BCH_DATA_CACHED],
stats.buckets[BCH_DATA_sb],
stats.buckets[BCH_DATA_journal],
stats.buckets[BCH_DATA_btree],
stats.buckets[BCH_DATA_user],
stats.buckets[BCH_DATA_cached],
stats.buckets_ec,
ca->mi.nbuckets - ca->mi.first_bucket - stats.buckets_unavailable,
stats.sectors[BCH_DATA_SB],
stats.sectors[BCH_DATA_JOURNAL],
stats.sectors[BCH_DATA_BTREE],
stats.sectors[BCH_DATA_USER],
stats.sectors[BCH_DATA_CACHED],
stats.sectors[BCH_DATA_sb],
stats.sectors[BCH_DATA_journal],
stats.sectors[BCH_DATA_btree],
stats.sectors[BCH_DATA_user],
stats.sectors[BCH_DATA_cached],
stats.sectors_ec,
stats.sectors_fragmented,
ca->copygc_threshold,
@ -887,8 +887,8 @@ static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf)
c->open_buckets_nr_free, OPEN_BUCKETS_COUNT,
BTREE_NODE_OPEN_BUCKET_RESERVE,
c->open_buckets_wait.list.first ? "waiting" : "empty",
nr[BCH_DATA_BTREE],
nr[BCH_DATA_USER],
nr[BCH_DATA_btree],
nr[BCH_DATA_user],
c->btree_reserve_cache_nr);
}