bcachefs: Switch btree locking code to struct btree_bkey_cached_common

This is just some type safety cleanup.

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
This commit is contained in:
Kent Overstreet 2022-08-22 13:21:10 -04:00 committed by Kent Overstreet
parent 616928c30f
commit 14599cce44
6 changed files with 28 additions and 23 deletions

View file

@ -886,7 +886,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *
if (btree_node_read_locked(path, level + 1))
btree_node_unlock(trans, path, level + 1);
ret = btree_node_lock(trans, path, b, k->k.p, level, lock_type,
ret = btree_node_lock(trans, path, &b->c, k->k.p, level, lock_type,
lock_node_check_fn, (void *) k, trace_ip);
if (unlikely(ret)) {
if (bch2_err_matches(ret, BCH_ERR_lock_fail_node_reused))

View file

@ -787,7 +787,7 @@ static inline int btree_path_lock_root(struct btree_trans *trans,
}
lock_type = __btree_lock_want(path, path->level);
ret = btree_node_lock(trans, path, b, SPOS_MAX,
ret = btree_node_lock(trans, path, &b->c, SPOS_MAX,
path->level, lock_type,
lock_root_check_fn, rootp,
trace_ip);

View file

@ -18,7 +18,7 @@ static inline void six_lock_readers_add(struct six_lock *lock, int nr)
struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
struct btree_path *skip,
struct btree *b,
struct btree_bkey_cached_common *b,
unsigned level)
{
struct btree_path *path;
@ -30,7 +30,7 @@ struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
return ret;
trans_for_each_path(trans, path)
if (path != skip && path->l[level].b == b) {
if (path != skip && &path->l[level].b->c == b) {
int t = btree_node_locked_type(path, level);
if (t != BTREE_NODE_UNLOCKED)
@ -52,7 +52,7 @@ void bch2_btree_node_unlock_write(struct btree_trans *trans,
void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
{
int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->c.level).n[SIX_LOCK_read];
int readers = bch2_btree_node_lock_counts(trans, NULL, &b->c, b->c.level).n[SIX_LOCK_read];
/*
* Must drop our read locks before calling six_lock_write() -
@ -78,7 +78,7 @@ static inline bool path_has_read_locks(struct btree_path *path)
/* Slowpath: */
int __bch2_btree_node_lock(struct btree_trans *trans,
struct btree_path *path,
struct btree *b,
struct btree_bkey_cached_common *b,
struct bpos pos, unsigned level,
enum six_lock_type type,
six_lock_should_sleep_fn should_sleep_fn, void *p,
@ -142,7 +142,7 @@ int __bch2_btree_node_lock(struct btree_trans *trans,
/* Must lock btree nodes in key order: */
if (btree_node_locked(linked, level) &&
bpos_cmp(pos, btree_node_pos((void *) linked->l[level].b,
bpos_cmp(pos, btree_node_pos(&linked->l[level].b->c,
linked->cached)) <= 0) {
reason = 7;
goto deadlock;
@ -216,7 +216,7 @@ bool __bch2_btree_node_relock(struct btree_trans *trans,
if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
(btree_node_lock_seq_matches(path, b, level) &&
btree_node_lock_increment(trans, b, level, want))) {
btree_node_lock_increment(trans, &b->c, level, want))) {
mark_btree_node_locked(trans, path, level, want);
return true;
}
@ -260,7 +260,7 @@ bool bch2_btree_node_upgrade(struct btree_trans *trans,
goto success;
if (btree_node_lock_seq_matches(path, b, level) &&
btree_node_lock_increment(trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
btree_node_lock_increment(trans, &b->c, level, BTREE_NODE_INTENT_LOCKED)) {
btree_node_unlock(trans, path, level);
goto success;
}

View file

@ -193,7 +193,7 @@ void bch2_btree_node_unlock_write(struct btree_trans *,
static inline int btree_node_lock_type(struct btree_trans *trans,
struct btree_path *path,
struct btree *b,
struct btree_bkey_cached_common *b,
struct bpos pos, unsigned level,
enum six_lock_type type,
six_lock_should_sleep_fn should_sleep_fn, void *p)
@ -202,7 +202,7 @@ static inline int btree_node_lock_type(struct btree_trans *trans,
u64 start_time;
int ret;
if (six_trylock_type(&b->c.lock, type))
if (six_trylock_type(&b->lock, type))
return 0;
start_time = local_clock();
@ -212,8 +212,8 @@ static inline int btree_node_lock_type(struct btree_trans *trans,
trans->locking_btree_id = path->btree_id;
trans->locking_level = level;
trans->locking_lock_type = type;
trans->locking = &b->c;
ret = six_lock_type(&b->c.lock, type, should_sleep_fn, p);
trans->locking = b;
ret = six_lock_type(&b->lock, type, should_sleep_fn, p);
trans->locking = NULL;
if (ret)
@ -228,15 +228,16 @@ static inline int btree_node_lock_type(struct btree_trans *trans,
* iterators:
*/
static inline bool btree_node_lock_increment(struct btree_trans *trans,
struct btree *b, unsigned level,
struct btree_bkey_cached_common *b,
unsigned level,
enum btree_node_locked_type want)
{
struct btree_path *path;
trans_for_each_path(trans, path)
if (path->l[level].b == b &&
if (&path->l[level].b->c == b &&
btree_node_locked_type(path, level) >= want) {
six_lock_increment(&b->c.lock, want);
six_lock_increment(&b->lock, want);
return true;
}
@ -244,14 +245,16 @@ static inline bool btree_node_lock_increment(struct btree_trans *trans,
}
int __bch2_btree_node_lock(struct btree_trans *, struct btree_path *,
struct btree *, struct bpos, unsigned,
struct btree_bkey_cached_common *,
struct bpos, unsigned,
enum six_lock_type,
six_lock_should_sleep_fn, void *,
unsigned long);
static inline int btree_node_lock(struct btree_trans *trans,
struct btree_path *path,
struct btree *b, struct bpos pos, unsigned level,
struct btree_bkey_cached_common *b,
struct bpos pos, unsigned level,
enum six_lock_type type,
six_lock_should_sleep_fn should_sleep_fn, void *p,
unsigned long ip)
@ -261,12 +264,12 @@ static inline int btree_node_lock(struct btree_trans *trans,
EBUG_ON(level >= BTREE_MAX_DEPTH);
EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
if (likely(six_trylock_type(&b->c.lock, type)) ||
if (likely(six_trylock_type(&b->lock, type)) ||
btree_node_lock_increment(trans, b, level, type) ||
!(ret = __bch2_btree_node_lock(trans, path, b, pos, level, type,
should_sleep_fn, p, ip))) {
#ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
path->l[b->c.level].lock_taken_time = ktime_get_ns();
path->l[b->level].lock_taken_time = ktime_get_ns();
#endif
}
@ -361,7 +364,9 @@ static inline void btree_path_set_level_up(struct btree_trans *trans,
/* debug */
struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *,
struct btree_path *, struct btree *, unsigned);
struct btree_path *,
struct btree_bkey_cached_common *b,
unsigned);
#ifdef CONFIG_BCACHEFS_DEBUG

View file

@ -822,7 +822,7 @@ static inline int trans_lock_write(struct btree_trans *trans)
goto fail;
ret = btree_node_lock_type(trans, i->path,
insert_l(i)->b,
&insert_l(i)->b->c,
i->path->pos, i->level,
SIX_LOCK_write, NULL, NULL);
BUG_ON(ret);

View file

@ -452,7 +452,7 @@ TRACE_EVENT(btree_node_upgrade_fail,
TRACE_BPOS_assign(pos, path->pos);
__entry->locked = btree_node_locked(path, level);
c = bch2_btree_node_lock_counts(trans, NULL, path->l[level].b, level),
c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
__entry->self_read_count = c.n[SIX_LOCK_read];
__entry->self_intent_count = c.n[SIX_LOCK_intent];
c = six_lock_counts(&path->l[level].b->c.lock);