bcachefs: Fix oldest_gen handling

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2019-02-11 19:27:33 -05:00 committed by Kent Overstreet
parent 053dbb377d
commit 76f4c7b0c3
7 changed files with 43 additions and 98 deletions

View file

@ -183,6 +183,7 @@ static void __alloc_read_key(struct bucket *g, const struct bch_alloc *a)
g->_mark.data_type = get_alloc_field(a, &d, idx++);
g->_mark.dirty_sectors = get_alloc_field(a, &d, idx++);
g->_mark.cached_sectors = get_alloc_field(a, &d, idx++);
g->oldest_gen = get_alloc_field(a, &d, idx++);
}
static void __alloc_write_key(struct bkey_i_alloc *a, struct bucket *g,
@ -200,6 +201,7 @@ static void __alloc_write_key(struct bkey_i_alloc *a, struct bucket *g,
put_alloc_field(a, &d, idx++, m.data_type);
put_alloc_field(a, &d, idx++, m.dirty_sectors);
put_alloc_field(a, &d, idx++, m.cached_sectors);
put_alloc_field(a, &d, idx++, g->oldest_gen);
set_bkey_val_bytes(&a->k, (void *) d - (void *) &a->v);
}

View file

@ -399,8 +399,6 @@ struct bch_dev {
struct bucket_array __rcu *buckets[2];
unsigned long *buckets_nouse;
unsigned long *buckets_written;
/* most out of date gen in the btree */
u8 *oldest_gens;
struct rw_semaphore bucket_lock;
struct bch_dev_usage __percpu *usage[2];

View file

@ -830,7 +830,8 @@ struct bch_alloc {
x(write_time, 2) \
x(data_type, 1) \
x(dirty_sectors, 2) \
x(cached_sectors, 2)
x(cached_sectors, 2) \
x(oldest_gen, 1)
enum {
#define x(name, bytes) BCH_ALLOC_FIELD_##name,

View file

@ -167,9 +167,10 @@ static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
bkey_for_each_ptr(ptrs, ptr) {
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
size_t b = PTR_BUCKET_NR(ca, ptr);
struct bucket *g = __bucket(ca, b, true);
if (gen_after(ca->oldest_gens[b], ptr->gen))
ca->oldest_gens[b] = ptr->gen;
if (gen_after(g->oldest_gen, ptr->gen))
g->oldest_gen = ptr->gen;
*max_stale = max(*max_stale, ptr_stale(ca, ptr));
}
@ -486,89 +487,38 @@ static void bch2_gc_free(struct bch_fs *c)
percpu_up_write(&c->mark_lock);
}
static void bch2_gc_done_nocheck(struct bch_fs *c)
{
struct bch_dev *ca;
unsigned i;
{
struct genradix_iter dst_iter = genradix_iter_init(&c->stripes[0], 0);
struct genradix_iter src_iter = genradix_iter_init(&c->stripes[1], 0);
struct stripe *dst, *src;
c->ec_stripes_heap.used = 0;
while ((dst = genradix_iter_peek(&dst_iter, &c->stripes[0])) &&
(src = genradix_iter_peek(&src_iter, &c->stripes[1]))) {
*dst = *src;
if (dst->alive)
bch2_stripes_heap_insert(c, dst, dst_iter.pos);
genradix_iter_advance(&dst_iter, &c->stripes[0]);
genradix_iter_advance(&src_iter, &c->stripes[1]);
}
}
for_each_member_device(ca, c, i) {
struct bucket_array *src = __bucket_array(ca, 1);
memcpy(__bucket_array(ca, 0), src,
sizeof(struct bucket_array) +
sizeof(struct bucket) * src->nbuckets);
};
for_each_member_device(ca, c, i) {
unsigned nr = sizeof(struct bch_dev_usage) / sizeof(u64);
struct bch_dev_usage *dst = (void *)
bch2_acc_percpu_u64s((void *) ca->usage[0], nr);
struct bch_dev_usage *src = (void *)
bch2_acc_percpu_u64s((void *) ca->usage[1], nr);
*dst = *src;
}
{
unsigned nr = sizeof(struct bch_fs_usage) / sizeof(u64) +
c->replicas.nr;
struct bch_fs_usage *dst = (void *)
bch2_acc_percpu_u64s((void *) c->usage[0], nr);
struct bch_fs_usage *src = (void *)
bch2_acc_percpu_u64s((void *) c->usage[1], nr);
unsigned offset = offsetof(typeof(*dst), s.gc_start);
memcpy((void *) dst + offset,
(void *) src + offset,
nr * sizeof(u64) - offset);
}
}
static void bch2_gc_done(struct bch_fs *c, bool initial)
{
struct bch_dev *ca;
bool verify = !initial ||
(c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_INFO));
unsigned i;
#define copy_field(_f, _msg, ...) \
if (dst->_f != src->_f) { \
bch_err(c, _msg ": got %llu, should be %llu, fixing" \
, ##__VA_ARGS__, dst->_f, src->_f); \
if (verify) \
bch_err(c, _msg ": got %llu, should be %llu, fixing"\
, ##__VA_ARGS__, dst->_f, src->_f); \
dst->_f = src->_f; \
}
#define copy_stripe_field(_f, _msg, ...) \
if (dst->_f != src->_f) { \
bch_err_ratelimited(c, "stripe %zu has wrong "_msg \
": got %u, should be %u, fixing", \
dst_iter.pos, ##__VA_ARGS__, \
dst->_f, src->_f); \
if (verify) \
bch_err_ratelimited(c, "stripe %zu has wrong "_msg\
": got %u, should be %u, fixing", \
dst_iter.pos, ##__VA_ARGS__, \
dst->_f, src->_f); \
dst->_f = src->_f; \
dst->dirty = true; \
}
#define copy_bucket_field(_f) \
if (dst->b[b].mark._f != src->b[b].mark._f) { \
bch_err_ratelimited(c, "dev %u bucket %zu has wrong " #_f\
": got %u, should be %u, fixing", \
i, b, dst->b[b].mark._f, src->b[b].mark._f); \
if (verify) \
bch_err_ratelimited(c, "dev %u bucket %zu has wrong " #_f\
": got %u, should be %u, fixing", i, b, \
dst->b[b].mark._f, src->b[b].mark._f); \
dst->b[b]._mark._f = src->b[b].mark._f; \
dst->b[b]._mark.dirty = true; \
}
#define copy_dev_field(_f, _msg, ...) \
copy_field(_f, "dev %u has wrong " _msg, i, ##__VA_ARGS__)
@ -577,12 +527,6 @@ static void bch2_gc_done(struct bch_fs *c, bool initial)
percpu_down_write(&c->mark_lock);
if (initial &&
!(c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_INFO))) {
bch2_gc_done_nocheck(c);
goto out;
}
{
struct genradix_iter dst_iter = genradix_iter_init(&c->stripes[0], 0);
struct genradix_iter src_iter = genradix_iter_init(&c->stripes[1], 0);
@ -633,6 +577,11 @@ static void bch2_gc_done(struct bch_fs *c, bool initial)
copy_bucket_field(stripe);
copy_bucket_field(dirty_sectors);
copy_bucket_field(cached_sectors);
if (dst->b[b].oldest_gen != src->b[b].oldest_gen) {
dst->b[b].oldest_gen = src->b[b].oldest_gen;
dst->b[b]._mark.dirty = true;
}
}
};
@ -645,16 +594,16 @@ static void bch2_gc_done(struct bch_fs *c, bool initial)
unsigned b;
for (b = 0; b < BCH_DATA_NR; b++)
copy_dev_field(buckets[b],
"buckets[%s]", bch2_data_types[b]);
copy_dev_field(buckets_alloc, "buckets_alloc");
copy_dev_field(buckets_ec, "buckets_ec");
copy_dev_field(buckets[b], "buckets[%s]",
bch2_data_types[b]);
copy_dev_field(buckets_alloc, "buckets_alloc");
copy_dev_field(buckets_ec, "buckets_ec");
copy_dev_field(buckets_unavailable, "buckets_unavailable");
for (b = 0; b < BCH_DATA_NR; b++)
copy_dev_field(sectors[b],
"sectors[%s]", bch2_data_types[b]);
copy_dev_field(sectors_fragmented,
"sectors_fragmented");
copy_dev_field(sectors[b], "sectors[%s]",
bch2_data_types[b]);
copy_dev_field(sectors_fragmented, "sectors_fragmented");
}
{
@ -682,7 +631,7 @@ static void bch2_gc_done(struct bch_fs *c, bool initial)
copy_fs_field(data[i], "data[%i]", i);
}
}
out:
percpu_up_write(&c->mark_lock);
#undef copy_fs_field
@ -745,7 +694,9 @@ static int bch2_gc_start(struct bch_fs *c)
dst->nbuckets = src->nbuckets;
for (b = 0; b < src->nbuckets; b++)
dst->b[b]._mark.gen = src->b[b].mark.gen;
dst->b[b]._mark.gen =
dst->b[b].oldest_gen =
src->b[b].mark.gen;
};
percpu_up_write(&c->mark_lock);

View file

@ -1148,7 +1148,6 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
struct bucket_array *buckets = NULL, *old_buckets = NULL;
unsigned long *buckets_nouse = NULL;
unsigned long *buckets_written = NULL;
u8 *oldest_gens = NULL;
alloc_fifo free[RESERVE_NR];
alloc_fifo free_inc;
alloc_heap alloc_heap;
@ -1174,8 +1173,6 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
if (!(buckets = kvpmalloc(sizeof(struct bucket_array) +
nbuckets * sizeof(struct bucket),
GFP_KERNEL|__GFP_ZERO)) ||
!(oldest_gens = kvpmalloc(nbuckets * sizeof(u8),
GFP_KERNEL|__GFP_ZERO)) ||
!(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
sizeof(unsigned long),
GFP_KERNEL|__GFP_ZERO)) ||
@ -1210,9 +1207,6 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
memcpy(buckets->b,
old_buckets->b,
n * sizeof(struct bucket));
memcpy(oldest_gens,
ca->oldest_gens,
n * sizeof(u8));
memcpy(buckets_nouse,
ca->buckets_nouse,
BITS_TO_LONGS(n) * sizeof(unsigned long));
@ -1224,7 +1218,6 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
rcu_assign_pointer(ca->buckets[0], buckets);
buckets = old_buckets;
swap(ca->oldest_gens, oldest_gens);
swap(ca->buckets_nouse, buckets_nouse);
swap(ca->buckets_written, buckets_written);
@ -1268,8 +1261,6 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
kvpfree(buckets_written,
BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
kvpfree(oldest_gens,
nbuckets * sizeof(u8));
if (buckets)
call_rcu(&old_buckets->rcu, buckets_free_rcu);
@ -1289,7 +1280,6 @@ void bch2_dev_buckets_free(struct bch_dev *ca)
BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
kvpfree(ca->buckets_nouse,
BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
kvpfree(ca->oldest_gens, ca->mi.nbuckets * sizeof(u8));
kvpfree(rcu_dereference_protected(ca->buckets[0], 1),
sizeof(struct bucket_array) +
ca->mi.nbuckets * sizeof(struct bucket));

View file

@ -87,7 +87,9 @@ static inline u16 bucket_last_io(struct bch_fs *c, struct bucket *g, int rw)
static inline u8 bucket_gc_gen(struct bch_dev *ca, size_t b)
{
return bucket(ca, b)->mark.gen - ca->oldest_gens[b];
struct bucket *g = bucket(ca, b);
return g->mark.gen - g->oldest_gen;
}
static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,

View file

@ -39,6 +39,7 @@ struct bucket {
};
u16 io_time[2];
u8 oldest_gen;
unsigned gen_valid:1;
};