bcachefs: move dirty into bucket_mark

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2018-11-19 01:16:07 -05:00 committed by Kent Overstreet
parent 90541a741d
commit 8eb7f3ee46
6 changed files with 49 additions and 23 deletions

View file

@ -185,9 +185,9 @@ static void __alloc_read_key(struct bucket *g, const struct bch_alloc *a)
g->_mark.cached_sectors = get_alloc_field(a, &d, idx++); g->_mark.cached_sectors = get_alloc_field(a, &d, idx++);
} }
static void __alloc_write_key(struct bkey_i_alloc *a, struct bucket *g) static void __alloc_write_key(struct bkey_i_alloc *a, struct bucket *g,
struct bucket_mark m)
{ {
struct bucket_mark m = READ_ONCE(g->mark);
unsigned idx = 0; unsigned idx = 0;
void *d = a->v.data; void *d = a->v.data;
@ -280,6 +280,8 @@ static int __bch2_alloc_write_key(struct bch_fs *c, struct bch_dev *ca,
__BKEY_PADDED(k, 8) alloc_key; __BKEY_PADDED(k, 8) alloc_key;
#endif #endif
struct bkey_i_alloc *a = bkey_alloc_init(&alloc_key.k); struct bkey_i_alloc *a = bkey_alloc_init(&alloc_key.k);
struct bucket *g;
struct bucket_mark m;
int ret; int ret;
BUG_ON(BKEY_ALLOC_VAL_U64s_MAX > 8); BUG_ON(BKEY_ALLOC_VAL_U64s_MAX > 8);
@ -287,7 +289,10 @@ static int __bch2_alloc_write_key(struct bch_fs *c, struct bch_dev *ca,
a->k.p = POS(ca->dev_idx, b); a->k.p = POS(ca->dev_idx, b);
percpu_down_read(&c->usage_lock); percpu_down_read(&c->usage_lock);
__alloc_write_key(a, bucket(ca, b)); g = bucket(ca, b);
m = bucket_cmpxchg(g, m, m.dirty = false);
__alloc_write_key(a, g, m);
percpu_up_read(&c->usage_lock); percpu_up_read(&c->usage_lock);
bch2_btree_iter_cond_resched(iter); bch2_btree_iter_cond_resched(iter);
@ -350,19 +355,24 @@ int bch2_alloc_write(struct bch_fs *c)
for_each_rw_member(ca, c, i) { for_each_rw_member(ca, c, i) {
struct btree_iter iter; struct btree_iter iter;
unsigned long bucket; struct bucket_array *buckets;
size_t b;
bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN, bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT); BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
down_read(&ca->bucket_lock); down_read(&ca->bucket_lock);
for_each_set_bit(bucket, ca->buckets_dirty, ca->mi.nbuckets) { buckets = bucket_array(ca);
ret = __bch2_alloc_write_key(c, ca, bucket,
&iter, NULL, 0); for (b = buckets->first_bucket;
b < buckets->nbuckets;
b++) {
if (!buckets->b[b].mark.dirty)
continue;
ret = __bch2_alloc_write_key(c, ca, b, &iter, NULL, 0);
if (ret) if (ret)
break; break;
clear_bit(bucket, ca->buckets_dirty);
} }
up_read(&ca->bucket_lock); up_read(&ca->bucket_lock);
bch2_btree_iter_unlock(&iter); bch2_btree_iter_unlock(&iter);
@ -541,6 +551,10 @@ static bool bch2_can_invalidate_bucket(struct bch_dev *ca,
if (!is_available_bucket(mark)) if (!is_available_bucket(mark))
return false; return false;
if (ca->buckets_nouse &&
test_bit(bucket, ca->buckets_nouse))
return false;
gc_gen = bucket_gc_gen(ca, bucket); gc_gen = bucket_gc_gen(ca, bucket);
if (gc_gen >= BUCKET_GC_GEN_MAX / 2) if (gc_gen >= BUCKET_GC_GEN_MAX / 2)
@ -1340,6 +1354,7 @@ static int __bch2_fs_allocator_start(struct bch_fs *c)
m = READ_ONCE(buckets->b[bu].mark); m = READ_ONCE(buckets->b[bu].mark);
if (!buckets->b[bu].gen_valid || if (!buckets->b[bu].gen_valid ||
!test_bit(bu, ca->buckets_nouse) ||
!is_available_bucket(m) || !is_available_bucket(m) ||
m.cached_sectors) m.cached_sectors)
continue; continue;
@ -1378,7 +1393,7 @@ static int __bch2_fs_allocator_start(struct bch_fs *c)
bch2_invalidate_one_bucket(c, ca, bu, &journal_seq); bch2_invalidate_one_bucket(c, ca, bu, &journal_seq);
fifo_push(&ca->free[RESERVE_BTREE], bu); fifo_push(&ca->free[RESERVE_BTREE], bu);
set_bit(bu, ca->buckets_dirty); bucket_set_dirty(ca, bu);
} }
} }

View file

@ -395,7 +395,7 @@ struct bch_dev {
* Or rcu_read_lock(), but only for ptr_stale(): * Or rcu_read_lock(), but only for ptr_stale():
*/ */
struct bucket_array __rcu *buckets[2]; struct bucket_array __rcu *buckets[2];
unsigned long *buckets_dirty; unsigned long *buckets_nouse;
unsigned long *buckets_written; unsigned long *buckets_written;
/* most out of date gen in the btree */ /* most out of date gen in the btree */
u8 *oldest_gens; u8 *oldest_gens;

View file

@ -150,7 +150,7 @@ static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
k.k->type, ptr->gen)) { k.k->type, ptr->gen)) {
g->_mark.gen = ptr->gen; g->_mark.gen = ptr->gen;
g->gen_valid = 1; g->gen_valid = 1;
set_bit(b, ca->buckets_dirty); bucket_set_dirty(ca, b);
} }
if (mustfix_fsck_err_on(gen_cmp(ptr->gen, g->mark.gen) > 0, c, if (mustfix_fsck_err_on(gen_cmp(ptr->gen, g->mark.gen) > 0, c,
@ -158,7 +158,7 @@ static int bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
k.k->type, ptr->gen, g->mark.gen)) { k.k->type, ptr->gen, g->mark.gen)) {
g->_mark.gen = ptr->gen; g->_mark.gen = ptr->gen;
g->gen_valid = 1; g->gen_valid = 1;
set_bit(b, ca->buckets_dirty); bucket_set_dirty(ca, b);
set_bit(BCH_FS_FIXED_GENS, &c->flags); set_bit(BCH_FS_FIXED_GENS, &c->flags);
} }
} }

View file

@ -1132,7 +1132,7 @@ static void buckets_free_rcu(struct rcu_head *rcu)
int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
{ {
struct bucket_array *buckets = NULL, *old_buckets = NULL; struct bucket_array *buckets = NULL, *old_buckets = NULL;
unsigned long *buckets_dirty = NULL; unsigned long *buckets_nouse = NULL;
unsigned long *buckets_written = NULL; unsigned long *buckets_written = NULL;
u8 *oldest_gens = NULL; u8 *oldest_gens = NULL;
alloc_fifo free[RESERVE_NR]; alloc_fifo free[RESERVE_NR];
@ -1162,7 +1162,7 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
GFP_KERNEL|__GFP_ZERO)) || GFP_KERNEL|__GFP_ZERO)) ||
!(oldest_gens = kvpmalloc(nbuckets * sizeof(u8), !(oldest_gens = kvpmalloc(nbuckets * sizeof(u8),
GFP_KERNEL|__GFP_ZERO)) || GFP_KERNEL|__GFP_ZERO)) ||
!(buckets_dirty = kvpmalloc(BITS_TO_LONGS(nbuckets) * !(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
sizeof(unsigned long), sizeof(unsigned long),
GFP_KERNEL|__GFP_ZERO)) || GFP_KERNEL|__GFP_ZERO)) ||
!(buckets_written = kvpmalloc(BITS_TO_LONGS(nbuckets) * !(buckets_written = kvpmalloc(BITS_TO_LONGS(nbuckets) *
@ -1199,8 +1199,8 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
memcpy(oldest_gens, memcpy(oldest_gens,
ca->oldest_gens, ca->oldest_gens,
n * sizeof(u8)); n * sizeof(u8));
memcpy(buckets_dirty, memcpy(buckets_nouse,
ca->buckets_dirty, ca->buckets_nouse,
BITS_TO_LONGS(n) * sizeof(unsigned long)); BITS_TO_LONGS(n) * sizeof(unsigned long));
memcpy(buckets_written, memcpy(buckets_written,
ca->buckets_written, ca->buckets_written,
@ -1211,7 +1211,7 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
buckets = old_buckets; buckets = old_buckets;
swap(ca->oldest_gens, oldest_gens); swap(ca->oldest_gens, oldest_gens);
swap(ca->buckets_dirty, buckets_dirty); swap(ca->buckets_nouse, buckets_nouse);
swap(ca->buckets_written, buckets_written); swap(ca->buckets_written, buckets_written);
if (resize) if (resize)
@ -1250,7 +1250,7 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
free_fifo(&free_inc); free_fifo(&free_inc);
for (i = 0; i < RESERVE_NR; i++) for (i = 0; i < RESERVE_NR; i++)
free_fifo(&free[i]); free_fifo(&free[i]);
kvpfree(buckets_dirty, kvpfree(buckets_nouse,
BITS_TO_LONGS(nbuckets) * sizeof(unsigned long)); BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
kvpfree(buckets_written, kvpfree(buckets_written,
BITS_TO_LONGS(nbuckets) * sizeof(unsigned long)); BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
@ -1273,7 +1273,7 @@ void bch2_dev_buckets_free(struct bch_dev *ca)
free_fifo(&ca->free[i]); free_fifo(&ca->free[i]);
kvpfree(ca->buckets_written, kvpfree(ca->buckets_written,
BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long)); BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
kvpfree(ca->buckets_dirty, kvpfree(ca->buckets_nouse,
BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long)); BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
kvpfree(ca->oldest_gens, ca->mi.nbuckets * sizeof(u8)); kvpfree(ca->oldest_gens, ca->mi.nbuckets * sizeof(u8));
kvpfree(rcu_dereference_protected(ca->buckets[0], 1), kvpfree(rcu_dereference_protected(ca->buckets[0], 1),

View file

@ -57,6 +57,18 @@ static inline struct bucket *bucket(struct bch_dev *ca, size_t b)
return __bucket(ca, b, false); return __bucket(ca, b, false);
} }
static inline void bucket_set_dirty(struct bch_dev *ca, size_t b)
{
struct bucket *g;
struct bucket_mark m;
rcu_read_lock();
g = bucket(ca, b);
bucket_cmpxchg(g, m, m.dirty = true);
rcu_read_unlock();
}
static inline void bucket_io_clock_reset(struct bch_fs *c, struct bch_dev *ca, static inline void bucket_io_clock_reset(struct bch_fs *c, struct bch_dev *ca,
size_t b, int rw) size_t b, int rw)
{ {
@ -196,8 +208,7 @@ static inline bool is_available_bucket(struct bucket_mark mark)
{ {
return (!mark.owned_by_allocator && return (!mark.owned_by_allocator &&
!mark.dirty_sectors && !mark.dirty_sectors &&
!mark.stripe && !mark.stripe);
!mark.nouse);
} }
static inline bool bucket_needs_journal_commit(struct bucket_mark m, static inline bool bucket_needs_journal_commit(struct bucket_mark m,

View file

@ -15,7 +15,7 @@ struct bucket_mark {
u8 gen; u8 gen;
u8 data_type:3, u8 data_type:3,
owned_by_allocator:1, owned_by_allocator:1,
nouse:1, dirty:1,
journal_seq_valid:1, journal_seq_valid:1,
stripe:1; stripe:1;
u16 dirty_sectors; u16 dirty_sectors;