bcachefs: Don't set accessed bit on btree node fill

Btree nodes shouldn't have their accessed bit set when entering the
btree cache by being read in from disk - this fixes linear scans
thrashing the cache.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2022-11-25 16:04:42 -05:00
parent b6804b6103
commit 447e92274a

View file

@ -707,6 +707,12 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
if (IS_ERR(b))
return b;
/*
* Btree nodes read in from disk should not have the accessed bit set
* initially, so that linear scans don't thrash the cache:
*/
clear_btree_node_accessed(b);
bkey_copy(&b->key, k);
if (bch2_btree_node_hash_insert(bc, b, level, btree_id)) {
/* raced with another fill: */
@ -843,6 +849,10 @@ static struct btree *__bch2_btree_node_get(struct btree_trans *trans, struct btr
trace_and_count(c, trans_restart_btree_node_reused, trans, trace_ip, path);
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_lock_node_reused));
}
/* avoid atomic set bit if it's not needed: */
if (!btree_node_accessed(b))
set_btree_node_accessed(b);
}
if (unlikely(btree_node_read_in_flight(b))) {
@ -880,10 +890,6 @@ static struct btree *__bch2_btree_node_get(struct btree_trans *trans, struct btr
prefetch(p + L1_CACHE_BYTES * 2);
}
/* avoid atomic set bit if it's not needed: */
if (!btree_node_accessed(b))
set_btree_node_accessed(b);
if (unlikely(btree_node_read_error(b))) {
six_unlock_type(&b->c.lock, lock_type);
return ERR_PTR(-EIO);