bcachefs: Add accounting for dirty btree nodes/keys

This lets us improve journal reclaim, so that it now tries to make sure
no more than 3/4s of the btree node cache and btree key cache are dirty
- ensuring the shrinkers can free memory.

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2020-11-09 13:01:52 -05:00 committed by Kent Overstreet
parent 01819cfe37
commit 6a747c4683
8 changed files with 70 additions and 16 deletions

View file

@ -382,11 +382,13 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
if (btree_node_dirty(b))
bch2_btree_complete_write(c, b, btree_current_write(b));
clear_btree_node_dirty(b);
clear_btree_node_dirty(c, b);
btree_node_data_free(c, b);
}
BUG_ON(atomic_read(&c->btree_cache.dirty));
while (!list_empty(&bc->freed)) {
b = list_first_entry(&bc->freed, struct btree, list);
list_del(&b->list);

View file

@ -1498,6 +1498,8 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
new ^= (1 << BTREE_NODE_write_idx);
} while (cmpxchg_acquire(&b->flags, old, new) != old);
atomic_dec(&c->btree_cache.dirty);
BUG_ON(btree_node_fake(b));
BUG_ON((b->will_make_reachable != 0) != !b->written);

View file

@ -14,6 +14,23 @@ struct btree_write;
struct btree;
struct btree_iter;
static inline bool btree_node_dirty(struct btree *b)
{
return test_bit(BTREE_NODE_dirty, &b->flags);
}
static inline void set_btree_node_dirty(struct bch_fs *c, struct btree *b)
{
if (!test_and_set_bit(BTREE_NODE_dirty, &b->flags))
atomic_inc(&c->btree_cache.dirty);
}
static inline void clear_btree_node_dirty(struct bch_fs *c, struct btree *b)
{
if (test_and_clear_bit(BTREE_NODE_dirty, &b->flags))
atomic_dec(&c->btree_cache.dirty);
}
struct btree_read_bio {
struct bch_fs *c;
u64 start_time;

View file

@ -65,6 +65,8 @@ static void bkey_cached_evict(struct btree_key_cache *c,
BUG_ON(rhashtable_remove_fast(&c->table, &ck->hash,
bch2_btree_key_cache_params));
memset(&ck->key, ~0, sizeof(ck->key));
c->nr_keys--;
}
static void bkey_cached_free(struct btree_key_cache *c,
@ -135,6 +137,8 @@ btree_key_cache_create(struct btree_key_cache *c,
return NULL;
}
c->nr_keys++;
list_move(&ck->list, &c->clean);
six_unlock_write(&ck->c.lock);
@ -355,10 +359,14 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
bch2_journal_pin_drop(j, &ck->journal);
bch2_journal_preres_put(j, &ck->res);
clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
if (!evict) {
mutex_lock(&c->btree_key_cache.lock);
if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
c->btree_key_cache.nr_dirty--;
}
list_move_tail(&ck->list, &c->btree_key_cache.clean);
mutex_unlock(&c->btree_key_cache.lock);
} else {
@ -371,6 +379,11 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
six_lock_write(&ck->c.lock, NULL, NULL);
mutex_lock(&c->btree_key_cache.lock);
if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
c->btree_key_cache.nr_dirty--;
}
bkey_cached_evict(&c->btree_key_cache, ck);
bkey_cached_free(&c->btree_key_cache, ck);
mutex_unlock(&c->btree_key_cache.lock);
@ -448,9 +461,10 @@ bool bch2_btree_insert_key_cached(struct btree_trans *trans,
if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
mutex_lock(&c->btree_key_cache.lock);
list_del_init(&ck->list);
list_move(&ck->list, &c->btree_key_cache.dirty);
set_bit(BKEY_CACHED_DIRTY, &ck->flags);
c->btree_key_cache.nr_dirty++;
mutex_unlock(&c->btree_key_cache.lock);
}
@ -467,20 +481,28 @@ void bch2_btree_key_cache_verify_clean(struct btree_trans *trans,
}
#endif
void bch2_fs_btree_key_cache_exit(struct btree_key_cache *c)
void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
{
struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
struct bkey_cached *ck, *n;
mutex_lock(&c->lock);
list_for_each_entry_safe(ck, n, &c->clean, list) {
mutex_lock(&bc->lock);
list_splice(&bc->dirty, &bc->clean);
list_for_each_entry_safe(ck, n, &bc->clean, list) {
kfree(ck->k);
kfree(ck);
bc->nr_keys--;
}
list_for_each_entry_safe(ck, n, &c->freed, list)
kfree(ck);
mutex_unlock(&c->lock);
rhashtable_destroy(&c->table);
BUG_ON(bc->nr_dirty && !bch2_journal_error(&c->journal));
BUG_ON(bc->nr_keys);
list_for_each_entry_safe(ck, n, &bc->freed, list)
kfree(ck);
mutex_unlock(&bc->lock);
rhashtable_destroy(&bc->table);
}
void bch2_fs_btree_key_cache_init_early(struct btree_key_cache *c)
@ -488,6 +510,7 @@ void bch2_fs_btree_key_cache_init_early(struct btree_key_cache *c)
mutex_init(&c->lock);
INIT_LIST_HEAD(&c->freed);
INIT_LIST_HEAD(&c->clean);
INIT_LIST_HEAD(&c->dirty);
}
int bch2_fs_btree_key_cache_init(struct btree_key_cache *c)

View file

@ -158,6 +158,7 @@ struct btree_cache {
/* Number of elements in live + freeable lists */
unsigned used;
unsigned reserve;
atomic_t dirty;
struct shrinker shrink;
/*
@ -294,6 +295,10 @@ struct btree_key_cache {
struct rhashtable table;
struct list_head freed;
struct list_head clean;
struct list_head dirty;
size_t nr_keys;
size_t nr_dirty;
};
struct bkey_cached_key {
@ -411,7 +416,6 @@ enum btree_flags {
BTREE_FLAG(read_in_flight);
BTREE_FLAG(read_error);
BTREE_FLAG(dirty);
BTREE_FLAG(need_write);
BTREE_FLAG(noevict);
BTREE_FLAG(write_idx);

View file

@ -149,7 +149,7 @@ void bch2_btree_node_free_never_inserted(struct bch_fs *c, struct btree *b)
b->ob.nr = 0;
clear_btree_node_dirty(b);
clear_btree_node_dirty(c, b);
btree_node_lock_type(c, b, SIX_LOCK_write);
__btree_node_free(c, b);
@ -264,7 +264,7 @@ static struct btree *bch2_btree_node_alloc(struct btree_update *as, unsigned lev
b = as->prealloc_nodes[--as->nr_prealloc_nodes];
set_btree_node_accessed(b);
set_btree_node_dirty(b);
set_btree_node_dirty(c, b);
set_btree_node_need_write(b);
bch2_bset_init_first(b, &b->data->keys);
@ -827,7 +827,7 @@ void bch2_btree_interior_update_will_free_node(struct btree_update *as,
closure_wake_up(&c->btree_interior_update_wait);
}
clear_btree_node_dirty(b);
clear_btree_node_dirty(c, b);
clear_btree_node_need_write(b);
/*
@ -1034,7 +1034,7 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b
bch2_btree_node_iter_advance(node_iter, b);
bch2_btree_bset_insert_key(iter, b, node_iter, insert);
set_btree_node_dirty(b);
set_btree_node_dirty(as->c, b);
set_btree_node_need_write(b);
}

View file

@ -191,7 +191,7 @@ static bool btree_insert_key_leaf(struct btree_trans *trans,
bch2_btree_add_journal_pin(c, b, trans->journal_res.seq);
if (unlikely(!btree_node_dirty(b)))
set_btree_node_dirty(b);
set_btree_node_dirty(c, b);
live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
u64s_added = (int) bset_u64s(t) - old_u64s;

View file

@ -547,6 +547,12 @@ void bch2_journal_reclaim(struct journal *j)
if (j->prereserved.reserved * 2 > j->prereserved.remaining)
min_nr = 1;
if ((atomic_read(&c->btree_cache.dirty) * 4 >
c->btree_cache.used * 3) ||
(c->btree_key_cache.nr_dirty * 4 >
c->btree_key_cache.nr_keys))
min_nr = 1;
} while (journal_flush_pins(j, seq_to_flush, min_nr));
if (!bch2_journal_error(j))