bcachefs: introduce b->hash_val

This is partly prep work for introducing bch_btree_ptr_v2, but it'll
also be a bit of a performance boost by moving the full key out of the
hot part of struct btree.

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2020-02-18 17:15:32 -05:00 committed by Kent Overstreet
parent b606c8aa56
commit 237e80483a
7 changed files with 52 additions and 44 deletions

View file

@ -62,13 +62,13 @@ static int bch2_btree_cache_cmp_fn(struct rhashtable_compare_arg *arg,
const struct btree *b = obj;
const u64 *v = arg->key;
return PTR_HASH(&b->key) == *v ? 0 : 1;
return b->hash_val == *v ? 0 : 1;
}
static const struct rhashtable_params bch_btree_cache_params = {
.head_offset = offsetof(struct btree, hash),
.key_offset = offsetof(struct btree, key.v),
.key_len = sizeof(struct bch_extent_ptr),
.key_offset = offsetof(struct btree, hash_val),
.key_len = sizeof(u64),
.obj_cmpfn = bch2_btree_cache_cmp_fn,
};
@ -115,11 +115,14 @@ void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b)
rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params);
/* Cause future lookups for this node to fail: */
PTR_HASH(&b->key) = 0;
b->hash_val = 0;
}
int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b)
{
BUG_ON(b->hash_val);
b->hash_val = btree_ptr_hash_val(&b->key);
return rhashtable_lookup_insert_fast(&bc->table, &b->hash,
bch_btree_cache_params);
}
@ -145,8 +148,9 @@ __flatten
static inline struct btree *btree_cache_find(struct btree_cache *bc,
const struct bkey_i *k)
{
return rhashtable_lookup_fast(&bc->table, &PTR_HASH(k),
bch_btree_cache_params);
u64 v = btree_ptr_hash_val(k);
return rhashtable_lookup_fast(&bc->table, &v, bch_btree_cache_params);
}
/*
@ -200,7 +204,7 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
btree_node_wait_on_io(b);
}
out:
if (PTR_HASH(&b->key) && !ret)
if (b->hash_val && !ret)
trace_btree_node_reap(c, b);
return ret;
out_unlock:
@ -608,7 +612,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
/* raced with another fill: */
/* mark as unhashed... */
PTR_HASH(&b->key) = 0;
b->hash_val = 0;
mutex_lock(&bc->lock);
list_add(&b->list, &bc->freeable);
@ -711,7 +715,7 @@ struct btree *bch2_btree_node_get(struct bch_fs *c, struct btree_iter *iter,
* free it:
*
* To guard against this, btree nodes are evicted from the cache
* when they're freed - and PTR_HASH() is zeroed out, which we
* when they're freed - and b->hash_val is zeroed out, which we
* check for after we lock the node.
*
* Then, bch2_btree_node_relock() on the parent will fail - because
@ -724,7 +728,7 @@ struct btree *bch2_btree_node_get(struct bch_fs *c, struct btree_iter *iter,
if (!btree_node_lock(b, k->k.p, level, iter, lock_type))
return ERR_PTR(-EINTR);
if (unlikely(PTR_HASH(&b->key) != PTR_HASH(k) ||
if (unlikely(b->hash_val != btree_ptr_hash_val(k) ||
b->c.level != level ||
race_fault())) {
six_unlock_type(&b->c.lock, lock_type);

View file

@ -35,13 +35,20 @@ void bch2_fs_btree_cache_exit(struct bch_fs *);
int bch2_fs_btree_cache_init(struct bch_fs *);
void bch2_fs_btree_cache_init_early(struct btree_cache *);
#define PTR_HASH(_k) *((u64 *) &bkey_i_to_btree_ptr_c(_k)->v)
static inline u64 btree_ptr_hash_val(const struct bkey_i *k)
{
switch (k->k.type) {
case KEY_TYPE_btree_ptr:
return *((u64 *) bkey_i_to_btree_ptr_c(k)->v.start);
default:
return 0;
}
}
/* is btree node in hash table? */
static inline bool btree_node_hashed(struct btree *b)
{
return b->key.k.type == KEY_TYPE_btree_ptr &&
PTR_HASH(&b->key);
return b->hash_val != 0;
}
#define for_each_cached_btree(_b, _c, _tbl, _iter, _pos) \

View file

@ -1254,8 +1254,6 @@ static void bch2_btree_node_write_error(struct bch_fs *c,
{
struct btree *b = wbio->wbio.bio.bi_private;
__BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
struct bkey_i_btree_ptr *new_key;
struct bkey_s_btree_ptr bp;
struct bch_extent_ptr *ptr;
struct btree_trans trans;
struct btree_iter *iter;
@ -1281,16 +1279,13 @@ static void bch2_btree_node_write_error(struct bch_fs *c,
bkey_copy(&tmp.k, &b->key);
new_key = bkey_i_to_btree_ptr(&tmp.k);
bp = btree_ptr_i_to_s(new_key);
bch2_bkey_drop_ptrs(bkey_i_to_s(&tmp.k), ptr,
bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
if (!bch2_bkey_nr_ptrs(bp.s_c))
if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&tmp.k)))
goto err;
ret = bch2_btree_node_update_key(c, iter, b, new_key);
ret = bch2_btree_node_update_key(c, iter, b, &tmp.k);
if (ret == -EINTR)
goto retry;
if (ret)

View file

@ -71,9 +71,7 @@ struct btree {
struct btree_bkey_cached_common c;
struct rhash_head hash;
/* Key/pointer for this btree node */
__BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
u64 hash_val;
unsigned long flags;
u16 written;
@ -136,6 +134,9 @@ struct btree {
#ifdef CONFIG_BCACHEFS_DEBUG
bool *expensive_debug_checks;
#endif
/* Key/pointer for this btree node */
__BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
};
struct btree_cache {

View file

@ -70,7 +70,7 @@ int bch2_btree_delete_range(struct bch_fs *, enum btree_id,
int bch2_btree_node_rewrite(struct bch_fs *c, struct btree_iter *,
__le64, unsigned);
int bch2_btree_node_update_key(struct bch_fs *, struct btree_iter *,
struct btree *, struct bkey_i_btree_ptr *);
struct btree *, struct bkey_i *);
int bch2_trans_update(struct btree_trans *, struct btree_iter *,
struct bkey_i *, enum btree_trigger_flags);

View file

@ -1944,7 +1944,7 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
struct btree_update *as,
struct btree_iter *iter,
struct btree *b, struct btree *new_hash,
struct bkey_i_btree_ptr *new_key)
struct bkey_i *new_key)
{
struct btree *parent;
int ret;
@ -1989,20 +1989,20 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
*/
ret = bch2_disk_reservation_add(c, &as->reserve->disk_res,
c->opts.btree_node_size *
bch2_bkey_nr_ptrs(bkey_i_to_s_c(&new_key->k_i)),
bch2_bkey_nr_ptrs(bkey_i_to_s_c(new_key)),
BCH_DISK_RESERVATION_NOFAIL);
BUG_ON(ret);
parent = btree_node_parent(iter, b);
if (parent) {
if (new_hash) {
bkey_copy(&new_hash->key, &new_key->k_i);
bkey_copy(&new_hash->key, new_key);
ret = bch2_btree_node_hash_insert(&c->btree_cache,
new_hash, b->c.level, b->c.btree_id);
BUG_ON(ret);
}
bch2_keylist_add(&as->parent_keys, &new_key->k_i);
bch2_keylist_add(&as->parent_keys, new_key);
bch2_btree_insert_node(as, parent, iter, &as->parent_keys, 0);
if (new_hash) {
@ -2011,12 +2011,12 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
bch2_btree_node_hash_remove(&c->btree_cache, b);
bkey_copy(&b->key, &new_key->k_i);
bkey_copy(&b->key, new_key);
ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
BUG_ON(ret);
mutex_unlock(&c->btree_cache.lock);
} else {
bkey_copy(&b->key, &new_key->k_i);
bkey_copy(&b->key, new_key);
}
} else {
struct bch_fs_usage_online *fs_usage;
@ -2029,11 +2029,11 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
percpu_down_read(&c->mark_lock);
fs_usage = bch2_fs_usage_scratch_get(c);
bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i),
bch2_mark_key_locked(c, bkey_i_to_s_c(new_key),
0, 0, &fs_usage->u, 0,
BTREE_TRIGGER_INSERT);
if (gc_visited(c, gc_pos_btree_root(b->c.btree_id)))
bch2_mark_key_locked(c, bkey_i_to_s_c(&new_key->k_i),
bch2_mark_key_locked(c, bkey_i_to_s_c(new_key),
0, 0, NULL, 0,
BTREE_TRIGGER_INSERT||
BTREE_TRIGGER_GC);
@ -2047,16 +2047,16 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
percpu_up_read(&c->mark_lock);
mutex_unlock(&c->btree_interior_update_lock);
if (PTR_HASH(&new_key->k_i) != PTR_HASH(&b->key)) {
if (btree_ptr_hash_val(new_key) != b->hash_val) {
mutex_lock(&c->btree_cache.lock);
bch2_btree_node_hash_remove(&c->btree_cache, b);
bkey_copy(&b->key, &new_key->k_i);
bkey_copy(&b->key, new_key);
ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
BUG_ON(ret);
mutex_unlock(&c->btree_cache.lock);
} else {
bkey_copy(&b->key, &new_key->k_i);
bkey_copy(&b->key, new_key);
}
btree_update_updated_root(as);
@ -2068,7 +2068,7 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
int bch2_btree_node_update_key(struct bch_fs *c, struct btree_iter *iter,
struct btree *b,
struct bkey_i_btree_ptr *new_key)
struct bkey_i *new_key)
{
struct btree *parent = btree_node_parent(iter, b);
struct btree_update *as = NULL;
@ -2091,8 +2091,11 @@ int bch2_btree_node_update_key(struct bch_fs *c, struct btree_iter *iter,
}
}
/* check PTR_HASH() after @b is locked by btree_iter_traverse(): */
if (PTR_HASH(&new_key->k_i) != PTR_HASH(&b->key)) {
/*
* check btree_ptr_hash_val() after @b is locked by
* btree_iter_traverse():
*/
if (btree_ptr_hash_val(new_key) != b->hash_val) {
/* bch2_btree_reserve_get will unlock */
ret = bch2_btree_cache_cannibalize_lock(c, &cl);
if (ret) {
@ -2134,7 +2137,7 @@ int bch2_btree_node_update_key(struct bch_fs *c, struct btree_iter *iter,
goto err;
}
ret = bch2_mark_bkey_replicas(c, bkey_i_to_s_c(&new_key->k_i));
ret = bch2_mark_bkey_replicas(c, bkey_i_to_s_c(new_key));
if (ret)
goto err_free_update;
@ -2193,7 +2196,7 @@ void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
bkey_btree_ptr_init(&b->key);
b->key.k.p = POS_MAX;
PTR_HASH(&b->key) = U64_MAX - id;
*((u64 *) bkey_i_to_btree_ptr(&b->key)->v.start) = U64_MAX - id;
bch2_bset_init_first(b, &b->data->keys);
bch2_btree_build_aux_trees(b);

View file

@ -123,23 +123,21 @@ static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
for_each_btree_node(&trans, iter, id, POS_MIN,
BTREE_ITER_PREFETCH, b) {
__BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
struct bkey_i_btree_ptr *new_key;
retry:
if (!bch2_bkey_has_device(bkey_i_to_s_c(&b->key),
dev_idx))
continue;
bkey_copy(&tmp.k, &b->key);
new_key = bkey_i_to_btree_ptr(&tmp.k);
ret = drop_dev_ptrs(c, bkey_i_to_s(&new_key->k_i),
ret = drop_dev_ptrs(c, bkey_i_to_s(&tmp.k),
dev_idx, flags, true);
if (ret) {
bch_err(c, "Cannot drop device without losing data");
goto err;
}
ret = bch2_btree_node_update_key(c, iter, b, new_key);
ret = bch2_btree_node_update_key(c, iter, b, &tmp.k);
if (ret == -EINTR) {
b = bch2_btree_iter_peek_node(iter);
goto retry;