bcachefs: Btree locking fix, refactoring

Hit an assertion, probably spurious, indicating an iterator was unlocked
when it shouldn't have been (spurious because it wasn't locked at all
when the caller called btree_insert_at()).

Add a flag, BTREE_ITER_NOUNLOCK, and tighten up the assertions

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2018-11-23 05:19:25 -05:00 committed by Kent Overstreet
parent de5bb710f9
commit ad7ae8d63f
7 changed files with 75 additions and 39 deletions

View file

@ -1113,7 +1113,6 @@ static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter,
/* Free the old nodes and update our sliding window */
for (i = 0; i < nr_old_nodes; i++) {
bch2_btree_node_free_inmem(c, old_nodes[i], iter);
six_unlock_intent(&old_nodes[i]->lock);
/*
* the index update might have triggered a split, in which case

View file

@ -264,10 +264,13 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
/* Btree iterator locking: */
#ifdef CONFIG_BCACHEFS_DEBUG
void bch2_btree_iter_verify_locks(struct btree_iter *iter)
void __bch2_btree_iter_verify_locks(struct btree_iter *iter)
{
unsigned l;
BUG_ON((iter->flags & BTREE_ITER_NOUNLOCK) &&
!btree_node_locked(iter, 0));
for (l = 0; btree_iter_node(iter, l); l++) {
if (iter->uptodate >= BTREE_ITER_NEED_RELOCK &&
!btree_node_locked(iter, l))
@ -277,6 +280,15 @@ void bch2_btree_iter_verify_locks(struct btree_iter *iter)
btree_node_locked_type(iter, l));
}
}
void bch2_btree_iter_verify_locks(struct btree_iter *iter)
{
struct btree_iter *linked;
for_each_btree_iter(iter, linked)
__bch2_btree_iter_verify_locks(linked);
}
#endif
__flatten
@ -382,9 +394,9 @@ void __bch2_btree_iter_downgrade(struct btree_iter *iter,
break;
}
}
bch2_btree_iter_verify_locks(linked);
}
bch2_btree_iter_verify_locks(iter);
}
int bch2_btree_iter_unlock(struct btree_iter *iter)
@ -776,9 +788,17 @@ void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
struct btree_iter *linked;
unsigned level = b->level;
/* caller now responsible for unlocking @b */
BUG_ON(iter->l[level].b != b);
BUG_ON(!btree_node_intent_locked(iter, level));
iter->l[level].b = BTREE_ITER_NOT_END;
mark_btree_node_unlocked(iter, level);
for_each_btree_iter(iter, linked)
if (linked->l[level].b == b) {
btree_node_unlock(linked, level);
__btree_node_unlock(linked, level);
linked->l[level].b = BTREE_ITER_NOT_END;
}
}

View file

@ -95,7 +95,7 @@ btree_lock_want(struct btree_iter *iter, int level)
return BTREE_NODE_UNLOCKED;
}
static inline void btree_node_unlock(struct btree_iter *iter, unsigned level)
static inline void __btree_node_unlock(struct btree_iter *iter, unsigned level)
{
int lock_type = btree_node_locked_type(iter, level);
@ -106,6 +106,13 @@ static inline void btree_node_unlock(struct btree_iter *iter, unsigned level)
mark_btree_node_unlocked(iter, level);
}
static inline void btree_node_unlock(struct btree_iter *iter, unsigned level)
{
BUG_ON(!level && iter->flags & BTREE_ITER_NOUNLOCK);
__btree_node_unlock(iter, level);
}
static inline void __bch2_btree_iter_unlock(struct btree_iter *iter)
{
btree_iter_set_dirty(iter, BTREE_ITER_NEED_RELOCK);

View file

@ -192,6 +192,7 @@ enum btree_iter_type {
*/
#define BTREE_ITER_IS_EXTENTS (1 << 4)
#define BTREE_ITER_ERROR (1 << 5)
#define BTREE_ITER_NOUNLOCK (1 << 6)
enum btree_iter_uptodate {
BTREE_ITER_UPTODATE = 0,

View file

@ -257,6 +257,11 @@ void bch2_btree_node_free_never_inserted(struct bch_fs *c, struct btree *b)
void bch2_btree_node_free_inmem(struct bch_fs *c, struct btree *b,
struct btree_iter *iter)
{
struct btree_iter *linked;
for_each_btree_iter(iter, linked)
BUG_ON(linked->l[b->level].b == b);
/*
* Is this a node that isn't reachable on disk yet?
*
@ -268,11 +273,10 @@ void bch2_btree_node_free_inmem(struct bch_fs *c, struct btree *b,
*/
btree_update_drop_new_node(c, b);
__bch2_btree_node_lock_write(b, iter);
six_lock_write(&b->lock, NULL, NULL);
__btree_node_free(c, b);
six_unlock_write(&b->lock);
bch2_btree_iter_node_drop(iter, b);
six_unlock_intent(&b->lock);
}
static void bch2_btree_node_free_ondisk(struct bch_fs *c,
@ -1421,25 +1425,19 @@ static void btree_split(struct btree_update *as, struct btree *b,
if (n3)
bch2_open_buckets_put(c, &n3->ob);
/*
* Note - at this point other linked iterators could still have @b read
* locked; we're depending on the bch2_btree_iter_node_replace() calls
* below removing all references to @b so we don't return with other
* iterators pointing to a node they have locked that's been freed.
*
* We have to free the node first because the bch2_iter_node_replace()
* calls will drop _our_ iterator's reference - and intent lock - to @b.
*/
bch2_btree_node_free_inmem(c, b, iter);
/* Successful split, update the iterator to point to the new nodes: */
bch2_btree_iter_node_drop(iter, b);
if (n3)
bch2_btree_iter_node_replace(iter, n3);
if (n2)
bch2_btree_iter_node_replace(iter, n2);
bch2_btree_iter_node_replace(iter, n1);
bch2_btree_node_free_inmem(c, b, iter);
bch2_btree_iter_verify_locks(iter);
bch2_time_stats_update(&c->times[BCH_TIME_btree_split], start_time);
}
@ -1735,17 +1733,21 @@ void __bch2_foreground_maybe_merge(struct bch_fs *c,
bch2_btree_insert_node(as, parent, iter, &as->parent_keys, flags);
bch2_open_buckets_put(c, &n->ob);
bch2_btree_node_free_inmem(c, b, iter);
bch2_btree_node_free_inmem(c, m, iter);
bch2_btree_iter_node_drop(iter, b);
bch2_btree_iter_node_replace(iter, n);
bch2_btree_iter_verify(iter, n);
bch2_btree_node_free_inmem(c, b, iter);
bch2_btree_node_free_inmem(c, m, iter);
bch2_btree_update_done(as);
six_unlock_intent(&m->lock);
up_read(&c->gc_lock);
out:
bch2_btree_iter_verify_locks(iter);
/*
* Don't downgrade locks here: we're called after successful insert,
* and the caller will downgrade locks after a successful insert
@ -1828,9 +1830,9 @@ static int __btree_node_rewrite(struct bch_fs *c, struct btree_iter *iter,
bch2_open_buckets_put(c, &n->ob);
bch2_btree_node_free_inmem(c, b, iter);
bch2_btree_iter_node_drop(iter, b);
bch2_btree_iter_node_replace(iter, n);
bch2_btree_node_free_inmem(c, b, iter);
bch2_btree_update_done(as);
return 0;

View file

@ -187,7 +187,6 @@ bch2_insert_fixup_key(struct btree_insert *trans,
insert->k))
bch2_btree_journal_key(trans, iter, insert->k);
trans->did_work = true;
return BTREE_INSERT_OK;
}
@ -338,6 +337,7 @@ static inline int do_btree_insert_at(struct btree_insert *trans,
{
struct bch_fs *c = trans->c;
struct btree_insert_entry *i;
struct btree_iter *linked;
unsigned u64s;
int ret;
@ -410,12 +410,25 @@ static inline int do_btree_insert_at(struct btree_insert *trans,
i->k->k.version = MAX_VERSION;
}
if (trans->flags & BTREE_INSERT_NOUNLOCK) {
/*
* linked iterators that weren't being updated may or may not
* have been traversed/locked, depending on what the caller was
* doing:
*/
for_each_btree_iter(trans->entries[0].iter, linked)
if (linked->uptodate < BTREE_ITER_NEED_RELOCK)
linked->flags |= BTREE_ITER_NOUNLOCK;
}
trans->did_work = true;
trans_for_each_entry(trans, i) {
switch (btree_insert_key_leaf(trans, i)) {
case BTREE_INSERT_OK:
break;
case BTREE_INSERT_NEED_TRAVERSE:
BUG_ON((trans->flags & BTREE_INSERT_ATOMIC));
BUG_ON((trans->flags &
(BTREE_INSERT_ATOMIC|BTREE_INSERT_NOUNLOCK)));
ret = -EINTR;
goto out;
default:
@ -461,8 +474,7 @@ int __bch2_btree_insert_at(struct btree_insert *trans)
BUG_ON(!trans->nr);
for_each_btree_iter(trans->entries[0].iter, linked)
bch2_btree_iter_verify_locks(linked);
bch2_btree_iter_verify_locks(trans->entries[0].iter);
/* for the sake of sanity: */
BUG_ON(trans->nr > 1 && !(trans->flags & BTREE_INSERT_ATOMIC));
@ -504,15 +516,11 @@ int __bch2_btree_insert_at(struct btree_insert *trans)
out:
percpu_ref_put(&c->writes);
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
/* make sure we didn't drop or screw up locks: */
for_each_btree_iter(trans->entries[0].iter, linked) {
bch2_btree_iter_verify_locks(linked);
BUG_ON((trans->flags & BTREE_INSERT_NOUNLOCK) &&
trans->did_work &&
!btree_node_locked(linked, 0));
}
}
bch2_btree_iter_verify_locks(trans->entries[0].iter);
for_each_btree_iter(trans->entries[0].iter, linked)
linked->flags &= ~BTREE_ITER_NOUNLOCK;
BUG_ON(!(trans->flags & BTREE_INSERT_ATOMIC) && ret == -EINTR);

View file

@ -1214,7 +1214,6 @@ static void extent_insert_committed(struct extent_insert_state *s)
bch2_cut_front(s->committed, insert);
insert->k.needs_whiteout = false;
s->trans->did_work = true;
}
void bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter)