bcachefs: journal_replay_early()

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2019-01-24 19:09:49 -05:00 committed by Kent Overstreet
parent 3ccc5c50f2
commit 42b72e0ba2
7 changed files with 115 additions and 30 deletions

View file

@ -1249,19 +1249,3 @@ int bch2_gc_thread_start(struct bch_fs *c)
wake_up_process(p);
return 0;
}
/* Initial GC computes bucket marks during startup */
int bch2_initial_gc(struct bch_fs *c, struct list_head *journal)
{
int ret = bch2_gc(c, journal, true);
/*
* Skip past versions that might have possibly been used (as nonces),
* but hadn't had their pointers written:
*/
if (c->sb.encryption_type)
atomic64_add(1 << 16, &c->key_version);
return ret;
}

View file

@ -8,7 +8,6 @@ void bch2_coalesce(struct bch_fs *);
int bch2_gc(struct bch_fs *, struct list_head *, bool);
void bch2_gc_thread_stop(struct bch_fs *);
int bch2_gc_thread_start(struct bch_fs *);
int bch2_initial_gc(struct bch_fs *, struct list_head *);
void bch2_mark_dev_superblock(struct bch_fs *, struct bch_dev *, unsigned);
/*

View file

@ -475,6 +475,7 @@ struct btree_root {
__BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
u8 level;
u8 alive;
s8 error;
};
/*

View file

@ -2122,7 +2122,6 @@ void bch2_btree_set_root_for_read(struct bch_fs *c, struct btree *b)
BUG_ON(btree_node_root(c, b));
__bch2_btree_set_root_inmem(c, b);
bch2_btree_set_root_ondisk(c, b, READ);
}
void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)

View file

@ -13,16 +13,17 @@
#include "journal_io.h"
#include "quota.h"
#include "recovery.h"
#include "replicas.h"
#include "super-io.h"
#include <linux/stat.h>
#define QSTR(n) { { { .len = strlen(n) } }, .name = n }
struct bkey_i *btree_root_find(struct bch_fs *c,
struct bch_sb_field_clean *clean,
struct jset *j,
enum btree_id id, unsigned *level)
static struct bkey_i *btree_root_find(struct bch_fs *c,
struct bch_sb_field_clean *clean,
struct jset *j,
enum btree_id id, unsigned *level)
{
struct bkey_i *k;
struct jset_entry *entry, *start, *end;
@ -50,6 +51,51 @@ struct bkey_i *btree_root_find(struct bch_fs *c,
return k;
}
static int journal_replay_entry_early(struct bch_fs *c,
struct jset_entry *entry)
{
int ret = 0;
switch (entry->type) {
case BCH_JSET_ENTRY_btree_root: {
struct btree_root *r = &c->btree_roots[entry->btree_id];
if (entry->u64s) {
r->level = entry->level;
bkey_copy(&r->key, &entry->start[0]);
r->error = 0;
} else {
r->error = -EIO;
}
r->alive = true;
break;
}
case BCH_JSET_ENTRY_usage: {
struct jset_entry_usage *u =
container_of(entry, struct jset_entry_usage, entry);
switch (u->type) {
case FS_USAGE_REPLICAS:
ret = bch2_replicas_set_usage(c, &u->r,
le64_to_cpu(u->sectors));
break;
case FS_USAGE_INODES:
percpu_u64_set(&c->usage[0]->s.nr_inodes,
le64_to_cpu(u->sectors));
break;
case FS_USAGE_KEY_VERSION:
atomic64_set(&c->key_version,
le64_to_cpu(u->sectors));
break;
}
break;
}
}
return ret;
}
static int verify_superblock_clean(struct bch_fs *c,
struct bch_sb_field_clean *clean,
struct jset *j)
@ -126,6 +172,7 @@ int bch2_fs_recovery(struct bch_fs *c)
{
const char *err = "cannot allocate memory";
struct bch_sb_field_clean *clean = NULL, *sb_clean = NULL;
struct jset_entry *entry;
LIST_HEAD(journal);
struct jset *j = NULL;
unsigned i;
@ -178,28 +225,44 @@ int bch2_fs_recovery(struct bch_fs *c)
fsck_err_on(clean && !journal_empty(&journal), c,
"filesystem marked clean but journal not empty");
err = "insufficient memory";
if (clean) {
c->bucket_clock[READ].hand = le16_to_cpu(clean->read_clock);
c->bucket_clock[WRITE].hand = le16_to_cpu(clean->write_clock);
for (entry = clean->start;
entry != vstruct_end(&clean->field);
entry = vstruct_next(entry)) {
ret = journal_replay_entry_early(c, entry);
if (ret)
goto err;
}
} else {
struct journal_replay *i;
c->bucket_clock[READ].hand = le16_to_cpu(j->read_clock);
c->bucket_clock[WRITE].hand = le16_to_cpu(j->write_clock);
list_for_each_entry(i, &journal, list)
vstruct_for_each(&i->j, entry) {
ret = journal_replay_entry_early(c, entry);
if (ret)
goto err;
}
}
for (i = 0; i < BTREE_ID_NR; i++) {
unsigned level;
struct bkey_i *k;
struct btree_root *r = &c->btree_roots[i];
k = btree_root_find(c, clean, j, i, &level);
if (!k)
if (!r->alive)
continue;
err = "invalid btree root pointer";
if (IS_ERR(k))
if (r->error)
goto err;
err = "error reading btree root";
if (bch2_btree_root_read(c, i, k, level)) {
if (bch2_btree_root_read(c, i, &r->key, r->level)) {
if (i != BTREE_ID_ALLOC)
goto err;
@ -226,13 +289,20 @@ int bch2_fs_recovery(struct bch_fs *c)
bch_verbose(c, "starting mark and sweep:");
err = "error in recovery";
ret = bch2_initial_gc(c, &journal);
ret = bch2_gc(c, &journal, true);
if (ret)
goto err;
bch_verbose(c, "mark and sweep done");
clear_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
/*
* Skip past versions that might have possibly been used (as nonces),
* but hadn't had their pointers written:
*/
if (c->sb.encryption_type && !c->sb.clean)
atomic64_add(1 << 16, &c->key_version);
if (c->opts.noreplay)
goto out;
@ -319,7 +389,7 @@ int bch2_fs_initialize(struct bch_fs *c)
for (i = 0; i < BTREE_ID_NR; i++)
bch2_btree_root_alloc(c, i);
ret = bch2_initial_gc(c, &journal);
ret = bch2_gc(c, &journal, true);
if (ret)
goto err;

View file

@ -530,6 +530,34 @@ int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
return 0;
}
int bch2_replicas_set_usage(struct bch_fs *c,
struct bch_replicas_entry *r,
u64 sectors)
{
int ret, idx = bch2_replicas_entry_idx(c, r);
if (idx < 0) {
struct bch_replicas_cpu n;
n = cpu_replicas_add_entry(&c->replicas, r);
if (!n.entries)
return -ENOMEM;
ret = replicas_table_update(c, &n);
if (ret)
return ret;
kfree(n.entries);
idx = bch2_replicas_entry_idx(c, r);
BUG_ON(ret < 0);
}
percpu_u64_set(&c->usage[0]->data[idx], sectors);
return 0;
}
/* Replicas tracking - superblock: */
static int

View file

@ -57,6 +57,10 @@ unsigned bch2_dev_has_data(struct bch_fs *, struct bch_dev *);
int bch2_replicas_gc_end(struct bch_fs *, int);
int bch2_replicas_gc_start(struct bch_fs *, unsigned);
int bch2_replicas_set_usage(struct bch_fs *,
struct bch_replicas_entry *,
u64);
#define for_each_cpu_replicas_entry(_r, _i) \
for (_i = (_r)->entries; \
(void *) (_i) < (void *) (_r)->entries + (_r)->nr * (_r)->entry_size;\