bcachefs: btree_iter -> btree_path_idx_t

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2023-12-04 00:39:38 -05:00
parent 788cc25d15
commit 07f383c71f
11 changed files with 195 additions and 155 deletions

View file

@ -862,8 +862,9 @@ static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos
bch2_trans_copy_iter(&iter2, iter);
if (!bpos_eq(iter->path->l[0].b->key.k.p, SPOS_MAX))
end = bkey_min(end, bpos_nosnap_successor(iter->path->l[0].b->key.k.p));
struct btree_path *path = btree_iter_path(iter->trans, iter);
if (!bpos_eq(path->l[0].b->key.k.p, SPOS_MAX))
end = bkey_min(end, bpos_nosnap_successor(path->l[0].b->key.k.p));
end = bkey_min(end, POS(iter->pos.inode, iter->pos.offset + U32_MAX - 1));

View file

@ -367,7 +367,7 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
if (!ob)
set_btree_iter_dontneed(&iter);
err:
if (iter.trans && iter.path)
if (iter.path)
set_btree_iter_dontneed(&iter);
bch2_trans_iter_exit(trans, &iter);
printbuf_exit(&buf);

View file

@ -22,8 +22,8 @@
#include <linux/prefetch.h>
static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
static inline void btree_path_list_add(struct btree_trans *, struct btree_path *,
struct btree_path *);
static inline void btree_path_list_add(struct btree_trans *,
btree_path_idx_t, btree_path_idx_t);
static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
{
@ -252,7 +252,7 @@ static void bch2_btree_iter_verify(struct btree_iter *iter)
BUG_ON(iter->btree_id >= BTREE_ID_NR);
BUG_ON(!!(iter->flags & BTREE_ITER_CACHED) != iter->path->cached);
BUG_ON(!!(iter->flags & BTREE_ITER_CACHED) != btree_iter_path(trans, iter)->cached);
BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
(iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
@ -262,8 +262,8 @@ static void bch2_btree_iter_verify(struct btree_iter *iter)
!btree_type_has_snapshot_field(iter->btree_id));
if (iter->update_path)
bch2_btree_path_verify(trans, iter->update_path);
bch2_btree_path_verify(trans, iter->path);
bch2_btree_path_verify(trans, &trans->paths[iter->update_path]);
bch2_btree_path_verify(trans, btree_iter_path(trans, iter));
}
static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
@ -1542,7 +1542,7 @@ static inline btree_path_idx_t btree_path_alloc(struct btree_trans *trans,
path->intent_ref = 0;
path->nodes_locked = 0;
btree_path_list_add(trans, pos ? trans->paths + pos : NULL, path);
btree_path_list_add(trans, pos, idx);
trans->paths_sorted = false;
return idx;
}
@ -1667,7 +1667,7 @@ struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *
int __must_check
__bch2_btree_iter_traverse(struct btree_iter *iter)
{
return bch2_btree_path_traverse(iter->trans, iter->path->idx, iter->flags);
return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
}
int __must_check
@ -1676,16 +1676,16 @@ bch2_btree_iter_traverse(struct btree_iter *iter)
struct btree_trans *trans = iter->trans;
int ret;
iter->path = trans->paths + bch2_btree_path_set_pos(trans, iter->path->idx,
iter->path = bch2_btree_path_set_pos(trans, iter->path,
btree_iter_search_key(iter),
iter->flags & BTREE_ITER_INTENT,
btree_iter_ip_allocated(iter));
ret = bch2_btree_path_traverse(trans, iter->path->idx, iter->flags);
ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
if (ret)
return ret;
btree_path_set_should_be_locked(iter->path);
btree_path_set_should_be_locked(trans->paths + iter->path);
return 0;
}
@ -1697,14 +1697,15 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
struct btree *b = NULL;
int ret;
EBUG_ON(iter->path->cached);
EBUG_ON(trans->paths[iter->path].cached);
bch2_btree_iter_verify(iter);
ret = bch2_btree_path_traverse(trans, iter->path->idx, iter->flags);
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (ret)
goto err;
b = btree_path_node(iter->path, iter->path->level);
struct btree_path *path = btree_iter_path(trans, iter);
b = btree_path_node(path, path->level);
if (!b)
goto out;
@ -1713,10 +1714,10 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
bkey_init(&iter->k);
iter->k.p = iter->pos = b->key.k.p;
iter->path = trans->paths + bch2_btree_path_set_pos(trans, iter->path->idx, b->key.k.p,
iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
iter->flags & BTREE_ITER_INTENT,
btree_iter_ip_allocated(iter));
btree_path_set_should_be_locked(iter->path);
btree_path_set_should_be_locked(btree_iter_path(trans, iter));
out:
bch2_btree_iter_verify_entry_exit(iter);
bch2_btree_iter_verify(iter);
@ -1741,14 +1742,15 @@ struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *iter)
struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
{
struct btree_trans *trans = iter->trans;
struct btree_path *path = iter->path;
struct btree *b = NULL;
int ret;
EBUG_ON(trans->paths[iter->path].cached);
bch2_trans_verify_not_in_restart(trans);
EBUG_ON(iter->path->cached);
bch2_btree_iter_verify(iter);
struct btree_path *path = btree_iter_path(trans, iter);
/* already at end? */
if (!btree_path_node(path, path->level))
return NULL;
@ -1778,28 +1780,30 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
* Haven't gotten to the end of the parent node: go back down to
* the next child node
*/
path = iter->path = trans->paths +
bch2_btree_path_set_pos(trans, path->idx, bpos_successor(iter->pos),
iter->flags & BTREE_ITER_INTENT,
btree_iter_ip_allocated(iter));
iter->path = bch2_btree_path_set_pos(trans, iter->path,
bpos_successor(iter->pos),
iter->flags & BTREE_ITER_INTENT,
btree_iter_ip_allocated(iter));
path = btree_iter_path(trans, iter);
btree_path_set_level_down(trans, path, iter->min_depth);
ret = bch2_btree_path_traverse(trans, path->idx, iter->flags);
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (ret)
goto err;
path = btree_iter_path(trans, iter);
b = path->l[path->level].b;
}
bkey_init(&iter->k);
iter->k.p = iter->pos = b->key.k.p;
iter->path = trans->paths + bch2_btree_path_set_pos(trans, iter->path->idx, b->key.k.p,
iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
iter->flags & BTREE_ITER_INTENT,
btree_iter_ip_allocated(iter));
btree_path_set_should_be_locked(iter->path);
BUG_ON(iter->path->uptodate);
btree_path_set_should_be_locked(btree_iter_path(trans, iter));
EBUG_ON(btree_iter_path(trans, iter)->uptodate);
out:
bch2_btree_iter_verify_entry_exit(iter);
bch2_btree_iter_verify(iter);
@ -1841,14 +1845,15 @@ inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
static noinline
struct bkey_i *__bch2_btree_trans_peek_updates(struct btree_iter *iter)
{
struct btree_trans *trans = iter->trans;
struct bkey_i *ret = NULL;
trans_for_each_update(iter->trans, i) {
trans_for_each_update(trans, i) {
if (i->btree_id < iter->btree_id)
continue;
if (i->btree_id > iter->btree_id)
break;
if (bpos_lt(i->k->k.p, iter->path->pos))
if (bpos_lt(i->k->k.p, btree_iter_path(trans, iter)->pos))
continue;
if (i->key_cache_already_flushed)
continue;
@ -1870,9 +1875,11 @@ static struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
struct btree_iter *iter,
struct bpos end_pos)
{
struct btree_path *path = btree_iter_path(trans, iter);
return bch2_journal_keys_peek_upto(trans->c, iter->btree_id,
iter->path->level,
iter->path->pos,
path->level,
path->pos,
end_pos,
&iter->journal_idx);
}
@ -1881,7 +1888,8 @@ static noinline
struct bkey_s_c btree_trans_peek_slot_journal(struct btree_trans *trans,
struct btree_iter *iter)
{
struct bkey_i *k = bch2_btree_journal_peek(trans, iter, iter->path->pos);
struct btree_path *path = btree_iter_path(trans, iter);
struct bkey_i *k = bch2_btree_journal_peek(trans, iter, path->pos);
if (k) {
iter->k = k->k;
@ -1896,9 +1904,10 @@ struct bkey_s_c btree_trans_peek_journal(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_s_c k)
{
struct btree_path *path = btree_iter_path(trans, iter);
struct bkey_i *next_journal =
bch2_btree_journal_peek(trans, iter,
k.k ? k.k->p : path_l(iter->path)->b->key.k.p);
k.k ? k.k->p : path_l(path)->b->key.k.p);
if (next_journal) {
iter->k = next_journal->k;
@ -1929,25 +1938,25 @@ struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos
return bkey_s_c_null;
if (!iter->key_cache_path)
iter->key_cache_path = trans->paths + bch2_path_get(trans, iter->btree_id, pos,
iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
iter->flags & BTREE_ITER_INTENT, 0,
iter->flags|BTREE_ITER_CACHED|
BTREE_ITER_CACHED_NOFILL,
_THIS_IP_);
iter->key_cache_path = trans->paths + bch2_btree_path_set_pos(trans, iter->key_cache_path->idx, pos,
iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
iter->flags & BTREE_ITER_INTENT,
btree_iter_ip_allocated(iter));
ret = bch2_btree_path_traverse(trans, iter->key_cache_path->idx,
ret = bch2_btree_path_traverse(trans, iter->key_cache_path,
iter->flags|BTREE_ITER_CACHED) ?:
bch2_btree_path_relock(trans, iter->path, _THIS_IP_);
bch2_btree_path_relock(trans, btree_iter_path(trans, iter), _THIS_IP_);
if (unlikely(ret))
return bkey_s_c_err(ret);
btree_path_set_should_be_locked(iter->key_cache_path);
btree_path_set_should_be_locked(trans->paths + iter->key_cache_path);
k = bch2_btree_path_peek_slot(iter->key_cache_path, &u);
k = bch2_btree_path_peek_slot(trans->paths + iter->key_cache_path, &u);
if (k.k && !bkey_err(k)) {
iter->k = u;
k.k = &iter->k;
@ -1962,17 +1971,17 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
struct bkey_s_c k, k2;
int ret;
EBUG_ON(iter->path->cached);
EBUG_ON(btree_iter_path(trans, iter)->cached);
bch2_btree_iter_verify(iter);
while (1) {
struct btree_path_level *l;
iter->path = trans->paths + bch2_btree_path_set_pos(trans, iter->path->idx, search_key,
iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
iter->flags & BTREE_ITER_INTENT,
btree_iter_ip_allocated(iter));
ret = bch2_btree_path_traverse(trans, iter->path->idx, iter->flags);
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (unlikely(ret)) {
/* ensure that iter->k is consistent with iter->pos: */
bch2_btree_iter_set_pos(iter, iter->pos);
@ -1980,7 +1989,8 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
goto out;
}
l = path_l(iter->path);
struct btree_path *path = btree_iter_path(trans, iter);
l = path_l(path);
if (unlikely(!l->b)) {
/* No btree nodes at requested level: */
@ -1989,7 +1999,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
goto out;
}
btree_path_set_should_be_locked(iter->path);
btree_path_set_should_be_locked(path);
k = btree_path_level_peek_all(trans->c, l, &iter->k);
@ -2067,9 +2077,9 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
EBUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) && bkey_eq(end, POS_MAX));
if (iter->update_path) {
bch2_path_put_nokeep(trans, iter->update_path->idx,
bch2_path_put_nokeep(trans, iter->update_path,
iter->flags & BTREE_ITER_INTENT);
iter->update_path = NULL;
iter->update_path = 0;
}
bch2_btree_iter_verify_entry_exit(iter);
@ -2095,10 +2105,10 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
goto end;
if (iter->update_path &&
!bkey_eq(iter->update_path->pos, k.k->p)) {
bch2_path_put_nokeep(trans, iter->update_path->idx,
!bkey_eq(trans->paths[iter->update_path].pos, k.k->p)) {
bch2_path_put_nokeep(trans, iter->update_path,
iter->flags & BTREE_ITER_INTENT);
iter->update_path = NULL;
iter->update_path = 0;
}
if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
@ -2118,14 +2128,14 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
* advance, same as on exit for iter->path, but only up
* to snapshot
*/
__btree_path_get(iter->path, iter->flags & BTREE_ITER_INTENT);
__btree_path_get(trans->paths + iter->path, iter->flags & BTREE_ITER_INTENT);
iter->update_path = iter->path;
iter->update_path = trans->paths + bch2_btree_path_set_pos(trans,
iter->update_path->idx, pos,
iter->update_path = bch2_btree_path_set_pos(trans,
iter->update_path, pos,
iter->flags & BTREE_ITER_INTENT,
_THIS_IP_);
ret = bch2_btree_path_traverse(trans, iter->update_path->idx, iter->flags);
ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags);
if (unlikely(ret)) {
k = bkey_s_c_err(ret);
goto out_no_locked;
@ -2170,18 +2180,18 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
iter->pos = iter_pos;
iter->path = trans->paths + bch2_btree_path_set_pos(trans, iter->path->idx, k.k->p,
iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
iter->flags & BTREE_ITER_INTENT,
btree_iter_ip_allocated(iter));
btree_path_set_should_be_locked(iter->path);
btree_path_set_should_be_locked(btree_iter_path(trans, iter));
out_no_locked:
if (iter->update_path) {
ret = bch2_btree_path_relock(trans, iter->update_path, _THIS_IP_);
ret = bch2_btree_path_relock(trans, trans->paths + iter->update_path, _THIS_IP_);
if (unlikely(ret))
k = bkey_s_c_err(ret);
else
btree_path_set_should_be_locked(iter->update_path);
btree_path_set_should_be_locked(trans->paths + iter->update_path);
}
if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
@ -2234,7 +2244,8 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
const struct bch_val *saved_v;
int ret;
EBUG_ON(iter->path->cached || iter->path->level);
EBUG_ON(btree_iter_path(trans, iter)->cached ||
btree_iter_path(trans, iter)->level);
EBUG_ON(iter->flags & BTREE_ITER_WITH_UPDATES);
if (iter->flags & BTREE_ITER_WITH_JOURNAL)
@ -2247,11 +2258,11 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
search_key.snapshot = U32_MAX;
while (1) {
iter->path = trans->paths + bch2_btree_path_set_pos(trans, iter->path->idx, search_key,
iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
iter->flags & BTREE_ITER_INTENT,
btree_iter_ip_allocated(iter));
ret = bch2_btree_path_traverse(trans, iter->path->idx, iter->flags);
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (unlikely(ret)) {
/* ensure that iter->k is consistent with iter->pos: */
bch2_btree_iter_set_pos(iter, iter->pos);
@ -2259,14 +2270,14 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
goto out_no_locked;
}
k = btree_path_level_peek(trans, iter->path,
&iter->path->l[0], &iter->k);
struct btree_path *path = btree_iter_path(trans, iter);
k = btree_path_level_peek(trans, path, &path->l[0], &iter->k);
if (!k.k ||
((iter->flags & BTREE_ITER_IS_EXTENTS)
? bpos_ge(bkey_start_pos(k.k), search_key)
: bpos_gt(k.k->p, search_key)))
k = btree_path_level_prev(trans, iter->path,
&iter->path->l[0], &iter->k);
k = btree_path_level_prev(trans, path, &path->l[0], &iter->k);
if (likely(k.k)) {
if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) {
@ -2279,9 +2290,9 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
* that candidate
*/
if (saved_path && !bkey_eq(k.k->p, saved_k.p)) {
bch2_path_put_nokeep(trans, iter->path->idx,
bch2_path_put_nokeep(trans, iter->path,
iter->flags & BTREE_ITER_INTENT);
iter->path = saved_path;
iter->path = saved_path->idx;
saved_path = NULL;
iter->k = saved_k;
k.v = saved_v;
@ -2294,8 +2305,9 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
if (saved_path)
bch2_path_put_nokeep(trans, saved_path->idx,
iter->flags & BTREE_ITER_INTENT);
saved_path = trans->paths + btree_path_clone(trans, iter->path->idx,
saved_path = trans->paths + btree_path_clone(trans, iter->path,
iter->flags & BTREE_ITER_INTENT);
path = btree_iter_path(trans, iter);
saved_k = *k.k;
saved_v = k.v;
}
@ -2312,10 +2324,11 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
continue;
}
btree_path_set_should_be_locked(path);
break;
} else if (likely(!bpos_eq(iter->path->l[0].b->data->min_key, POS_MIN))) {
} else if (likely(!bpos_eq(path->l[0].b->data->min_key, POS_MIN))) {
/* Advance to previous leaf node: */
search_key = bpos_predecessor(iter->path->l[0].b->data->min_key);
search_key = bpos_predecessor(path->l[0].b->data->min_key);
} else {
/* Start of btree: */
bch2_btree_iter_set_pos(iter, POS_MIN);
@ -2332,8 +2345,6 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
iter->pos.snapshot = iter->snapshot;
btree_path_set_should_be_locked(iter->path);
out_no_locked:
if (saved_path)
bch2_path_put_nokeep(trans, saved_path->idx, iter->flags & BTREE_ITER_INTENT);
@ -2368,7 +2379,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
bch2_btree_iter_verify(iter);
bch2_btree_iter_verify_entry_exit(iter);
EBUG_ON(iter->path->level && (iter->flags & BTREE_ITER_WITH_KEY_CACHE));
EBUG_ON(btree_iter_path(trans, iter)->level && (iter->flags & BTREE_ITER_WITH_KEY_CACHE));
/* extents can't span inode numbers: */
if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
@ -2380,11 +2391,11 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
}
search_key = btree_iter_search_key(iter);
iter->path = trans->paths + bch2_btree_path_set_pos(trans, iter->path->idx, search_key,
iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
iter->flags & BTREE_ITER_INTENT,
btree_iter_ip_allocated(iter));
ret = bch2_btree_path_traverse(trans, iter->path->idx, iter->flags);
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (unlikely(ret)) {
k = bkey_s_c_err(ret);
goto out_no_locked;
@ -2413,7 +2424,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
goto out_no_locked;
}
k = bch2_btree_path_peek_slot(iter->path, &iter->k);
k = bch2_btree_path_peek_slot(trans->paths + iter->path, &iter->k);
if (unlikely(!k.k))
goto out_no_locked;
} else {
@ -2423,7 +2434,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
if (iter->flags & BTREE_ITER_IS_EXTENTS)
end.offset = U64_MAX;
EBUG_ON(iter->path->level);
EBUG_ON(btree_iter_path(trans, iter)->level);
if (iter->flags & BTREE_ITER_INTENT) {
struct btree_iter iter2;
@ -2469,7 +2480,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
}
}
out:
btree_path_set_should_be_locked(iter->path);
btree_path_set_should_be_locked(btree_iter_path(trans, iter));
out_no_locked:
bch2_btree_iter_verify_entry_exit(iter);
bch2_btree_iter_verify(iter);
@ -2614,21 +2625,22 @@ static inline void btree_path_list_remove(struct btree_trans *trans,
}
static inline void btree_path_list_add(struct btree_trans *trans,
struct btree_path *pos,
struct btree_path *path)
btree_path_idx_t pos,
btree_path_idx_t path_idx)
{
struct btree_path *path = trans->paths + path_idx;
unsigned i;
path->sorted_idx = pos ? pos->sorted_idx + 1 : trans->nr_sorted;
path->sorted_idx = pos ? trans->paths[pos].sorted_idx + 1 : trans->nr_sorted;
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
memmove_u64s_up_small(trans->sorted + path->sorted_idx + 1,
trans->sorted + path->sorted_idx,
DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx, 8));
trans->nr_sorted++;
trans->sorted[path->sorted_idx] = path->idx;
trans->sorted[path->sorted_idx] = path_idx;
#else
array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path->idx);
array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path_idx);
#endif
for (i = path->sorted_idx; i < trans->nr_sorted; i++)
@ -2640,17 +2652,18 @@ static inline void btree_path_list_add(struct btree_trans *trans,
void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
{
if (iter->update_path)
bch2_path_put_nokeep(trans, iter->update_path->idx,
bch2_path_put_nokeep(trans, iter->update_path,
iter->flags & BTREE_ITER_INTENT);
if (iter->path)
bch2_path_put(trans, iter->path->idx,
bch2_path_put(trans, iter->path,
iter->flags & BTREE_ITER_INTENT);
if (iter->key_cache_path)
bch2_path_put(trans, iter->key_cache_path->idx,
bch2_path_put(trans, iter->key_cache_path,
iter->flags & BTREE_ITER_INTENT);
iter->path = NULL;
iter->update_path = NULL;
iter->key_cache_path = NULL;
iter->path = 0;
iter->update_path = 0;
iter->key_cache_path = 0;
iter->trans = NULL;
}
void bch2_trans_iter_init_outlined(struct btree_trans *trans,
@ -2681,19 +2694,22 @@ void bch2_trans_node_iter_init(struct btree_trans *trans,
iter->min_depth = depth;
BUG_ON(iter->path->locks_want < min(locks_want, BTREE_MAX_DEPTH));
BUG_ON(iter->path->level != depth);
BUG_ON(iter->min_depth != depth);
struct btree_path *path = btree_iter_path(trans, iter);
BUG_ON(path->locks_want < min(locks_want, BTREE_MAX_DEPTH));
BUG_ON(path->level != depth);
BUG_ON(iter->min_depth != depth);
}
void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
{
struct btree_trans *trans = src->trans;
*dst = *src;
if (src->path)
__btree_path_get(src->path, src->flags & BTREE_ITER_INTENT);
__btree_path_get(trans->paths + src->path, src->flags & BTREE_ITER_INTENT);
if (src->update_path)
__btree_path_get(src->update_path, src->flags & BTREE_ITER_INTENT);
dst->key_cache_path = NULL;
__btree_path_get(trans->paths + src->update_path, src->flags & BTREE_ITER_INTENT);
dst->key_cache_path = 0;
}
void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)

View file

@ -203,7 +203,7 @@ static inline int __must_check bch2_btree_path_traverse(struct btree_trans *tran
}
btree_path_idx_t bch2_path_get(struct btree_trans *, enum btree_id, struct bpos,
unsigned, unsigned, unsigned, unsigned long);
unsigned, unsigned, unsigned, unsigned long);
struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
/*
@ -359,10 +359,12 @@ static inline void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpo
static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
{
struct btree_trans *trans = iter->trans;
if (unlikely(iter->update_path))
bch2_path_put(iter->trans, iter->update_path->idx,
bch2_path_put(trans, iter->update_path,
iter->flags & BTREE_ITER_INTENT);
iter->update_path = NULL;
iter->update_path = 0;
if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
new_pos.snapshot = iter->snapshot;
@ -431,8 +433,8 @@ static inline void bch2_trans_iter_init_common(struct btree_trans *trans,
unsigned long ip)
{
iter->trans = trans;
iter->update_path = NULL;
iter->key_cache_path = NULL;
iter->update_path = 0;
iter->key_cache_path = 0;
iter->btree_id = btree_id;
iter->min_depth = 0;
iter->flags = flags;
@ -443,7 +445,7 @@ static inline void bch2_trans_iter_init_common(struct btree_trans *trans,
#ifdef CONFIG_BCACHEFS_DEBUG
iter->ip_allocated = ip;
#endif
iter->path = trans->paths + bch2_path_get(trans, btree_id, iter->pos,
iter->path = bch2_path_get(trans, btree_id, iter->pos,
locks_want, depth, flags, ip);
}
@ -471,8 +473,10 @@ void bch2_trans_copy_iter(struct btree_iter *, struct btree_iter *);
static inline void set_btree_iter_dontneed(struct btree_iter *iter)
{
if (!iter->trans->restarted)
iter->path->preserve = false;
struct btree_trans *trans = iter->trans;
if (!trans->restarted)
btree_iter_path(trans, iter)->preserve = false;
}
void *__bch2_trans_kmalloc(struct btree_trans *, size_t);

View file

@ -630,7 +630,7 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
if (ret)
goto out;
ck = (void *) c_iter.path->l[0].b;
ck = (void *) btree_iter_path(trans, &c_iter)->l[0].b;
if (!ck)
goto out;
@ -680,7 +680,8 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
bch2_journal_pin_drop(j, &ck->journal);
BUG_ON(!btree_node_locked(c_iter.path, 0));
struct btree_path *path = btree_iter_path(trans, &c_iter);
BUG_ON(!btree_node_locked(path, 0));
if (!evict) {
if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
@ -691,17 +692,17 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
struct btree_path *path2;
evict:
trans_for_each_path(trans, path2)
if (path2 != c_iter.path)
if (path2 != path)
__bch2_btree_path_unlock(trans, path2);
bch2_btree_node_lock_write_nofail(trans, c_iter.path, &ck->c);
bch2_btree_node_lock_write_nofail(trans, path, &ck->c);
if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
atomic_long_dec(&c->btree_key_cache.nr_dirty);
}
mark_btree_node_locked_noreset(c_iter.path, 0, BTREE_NODE_UNLOCKED);
mark_btree_node_locked_noreset(path, 0, BTREE_NODE_UNLOCKED);
bkey_cached_evict(&c->btree_key_cache, ck);
bkey_cached_free_fast(&c->btree_key_cache, ck);
}

View file

@ -225,8 +225,8 @@ enum btree_path_uptodate {
typedef u16 btree_path_idx_t;
struct btree_path {
u8 idx;
u8 sorted_idx;
btree_path_idx_t idx;
btree_path_idx_t sorted_idx;
u8 ref;
u8 intent_ref;
@ -282,9 +282,9 @@ static inline unsigned long btree_path_ip_allocated(struct btree_path *path)
*/
struct btree_iter {
struct btree_trans *trans;
struct btree_path *path;
struct btree_path *update_path;
struct btree_path *key_cache_path;
btree_path_idx_t path;
btree_path_idx_t update_path;
btree_path_idx_t key_cache_path;
enum btree_id btree_id:8;
u8 min_depth;
@ -410,7 +410,7 @@ struct btree_trans {
* extent:
*/
unsigned extra_journal_res;
u8 nr_max_paths;
btree_path_idx_t nr_max_paths;
u16 journal_entries_u64s;
u16 journal_entries_size;
@ -437,6 +437,18 @@ struct btree_trans {
struct replicas_delta_list *fs_usage_deltas;
};
static inline struct btree_path *btree_iter_path(struct btree_trans *trans, struct btree_iter *iter)
{
return trans->paths + iter->path;
}
static inline struct btree_path *btree_iter_key_cache_path(struct btree_trans *trans, struct btree_iter *iter)
{
return iter->key_cache_path
? trans->paths + iter->key_cache_path
: NULL;
}
#define BCH_BTREE_WRITE_TYPES() \
x(initial, 0) \
x(init_next_bset, 1) \

View file

@ -266,7 +266,7 @@ int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
bch2_cut_front(new.k->p, update);
ret = bch2_trans_update_by_path(trans, iter->path, update,
ret = bch2_trans_update_by_path(trans, btree_iter_path(trans, iter), update,
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
flags, _RET_IP_);
if (ret)
@ -462,37 +462,37 @@ static noinline int bch2_trans_update_get_key_cache(struct btree_trans *trans,
struct btree_iter *iter,
struct btree_path *path)
{
if (!iter->key_cache_path ||
!iter->key_cache_path->should_be_locked ||
!bpos_eq(iter->key_cache_path->pos, iter->pos)) {
struct btree_path *key_cache_path = btree_iter_key_cache_path(trans, iter);
if (!key_cache_path ||
!key_cache_path->should_be_locked ||
!bpos_eq(key_cache_path->pos, iter->pos)) {
struct bkey_cached *ck;
int ret;
if (!iter->key_cache_path)
iter->key_cache_path = trans->paths +
iter->key_cache_path =
bch2_path_get(trans, path->btree_id, path->pos, 1, 0,
BTREE_ITER_INTENT|
BTREE_ITER_CACHED, _THIS_IP_);
iter->key_cache_path = trans->paths +
bch2_btree_path_set_pos(trans, iter->key_cache_path->idx, path->pos,
iter->key_cache_path =
bch2_btree_path_set_pos(trans, iter->key_cache_path, path->pos,
iter->flags & BTREE_ITER_INTENT,
_THIS_IP_);
ret = bch2_btree_path_traverse(trans, iter->key_cache_path->idx,
BTREE_ITER_CACHED);
ret = bch2_btree_path_traverse(trans, iter->key_cache_path, BTREE_ITER_CACHED);
if (unlikely(ret))
return ret;
ck = (void *) iter->key_cache_path->l[0].b;
ck = (void *) trans->paths[iter->key_cache_path].l[0].b;
if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
trace_and_count(trans->c, trans_restart_key_cache_raced, trans, _RET_IP_);
return btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced);
}
btree_path_set_should_be_locked(iter->key_cache_path);
btree_path_set_should_be_locked(trans->paths + iter->key_cache_path);
}
return 0;
@ -501,7 +501,7 @@ static noinline int bch2_trans_update_get_key_cache(struct btree_trans *trans,
int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter,
struct bkey_i *k, enum btree_update_flags flags)
{
struct btree_path *path = iter->update_path ?: iter->path;
struct btree_path *path = trans->paths + (iter->update_path ?: iter->path);
int ret;
if (iter->flags & BTREE_ITER_IS_EXTENTS)
@ -529,7 +529,7 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter
if (ret)
return ret;
path = iter->key_cache_path;
path = trans->paths + iter->key_cache_path;
}
return bch2_trans_update_by_path(trans, path, k, flags, _RET_IP_);

View file

@ -1951,9 +1951,9 @@ int bch2_btree_node_rewrite(struct btree_trans *trans,
flags |= BCH_TRANS_COMMIT_no_enospc;
parent = btree_node_parent(iter->path, b);
as = bch2_btree_update_start(trans, iter->path, b->c.level,
false, flags);
struct btree_path *path = btree_iter_path(trans, iter);
parent = btree_node_parent(path, b);
as = bch2_btree_update_start(trans, path, b->c.level, false, flags);
ret = PTR_ERR_OR_ZERO(as);
if (ret)
goto out;
@ -1975,20 +1975,20 @@ int bch2_btree_node_rewrite(struct btree_trans *trans,
if (parent) {
bch2_keylist_add(&as->parent_keys, &n->key);
ret = bch2_btree_insert_node(as, trans, iter->path, parent,
&as->parent_keys, flags);
ret = bch2_btree_insert_node(as, trans, btree_iter_path(trans, iter),
parent, &as->parent_keys, flags);
if (ret)
goto err;
} else {
bch2_btree_set_root(as, trans, iter->path, n);
bch2_btree_set_root(as, trans, btree_iter_path(trans, iter), n);
}
bch2_btree_update_get_open_buckets(as, n);
bch2_btree_node_write(c, n, SIX_LOCK_intent, 0);
bch2_btree_node_free_inmem(trans, iter->path, b);
bch2_btree_node_free_inmem(trans, btree_iter_path(trans, iter), b);
bch2_trans_node_add(trans, iter->path, n);
bch2_trans_node_add(trans, trans->paths + iter->path, n);
six_unlock_intent(&n->c.lock);
bch2_btree_update_done(as, trans);
@ -2165,18 +2165,19 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
BUG_ON(ret);
}
parent = btree_node_parent(iter->path, b);
parent = btree_node_parent(btree_iter_path(trans, iter), b);
if (parent) {
bch2_trans_copy_iter(&iter2, iter);
iter2.path = trans->paths + bch2_btree_path_make_mut(trans, iter2.path->idx,
iter2.path = bch2_btree_path_make_mut(trans, iter2.path,
iter2.flags & BTREE_ITER_INTENT,
_THIS_IP_);
BUG_ON(iter2.path->level != b->c.level);
BUG_ON(!bpos_eq(iter2.path->pos, new_key->k.p));
struct btree_path *path2 = btree_iter_path(trans, &iter2);
BUG_ON(path2->level != b->c.level);
BUG_ON(!bpos_eq(path2->pos, new_key->k.p));
btree_path_set_level_up(trans, iter2.path);
btree_path_set_level_up(trans, path2);
trans->paths_sorted = false;
@ -2203,7 +2204,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
if (ret)
goto err;
bch2_btree_node_lock_write_nofail(trans, iter->path, &b->c);
bch2_btree_node_lock_write_nofail(trans, btree_iter_path(trans, iter), &b->c);
if (new_hash) {
mutex_lock(&c->btree_cache.lock);
@ -2218,7 +2219,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
bkey_copy(&b->key, new_key);
}
bch2_btree_node_unlock_write(trans, iter->path, b);
bch2_btree_node_unlock_write(trans, btree_iter_path(trans, iter), b);
out:
bch2_trans_iter_exit(trans, &iter2);
return ret;
@ -2237,7 +2238,7 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite
{
struct bch_fs *c = trans->c;
struct btree *new_hash = NULL;
struct btree_path *path = iter->path;
struct btree_path *path = btree_iter_path(trans, iter);
struct closure cl;
int ret = 0;
@ -2295,7 +2296,7 @@ int bch2_btree_node_update_key_get_iter(struct btree_trans *trans,
goto out;
/* has node been freed? */
if (iter.path->l[b->c.level].b != b) {
if (btree_iter_path(trans, &iter)->l[b->c.level].b != b) {
/* node has been freed: */
BUG_ON(!btree_node_dying(b));
goto out;

View file

@ -106,7 +106,9 @@ static noinline int wb_flush_one_slowpath(struct btree_trans *trans,
struct btree_iter *iter,
struct btree_write_buffered_key *wb)
{
bch2_btree_node_unlock_write(trans, iter->path, iter->path->l[0].b);
struct btree_path *path = btree_iter_path(trans, iter);
bch2_btree_node_unlock_write(trans, path, path->l[0].b);
trans->journal_res.seq = wb->journal_seq;
@ -139,10 +141,10 @@ static inline int wb_flush_one(struct btree_trans *trans, struct btree_iter *ite
* We can't clone a path that has write locks: unshare it now, before
* set_pos and traverse():
*/
if (iter->path->ref > 1)
iter->path = trans->paths + __bch2_btree_path_make_mut(trans, iter->path->idx, true, _THIS_IP_);
if (btree_iter_path(trans, iter)->ref > 1)
iter->path = __bch2_btree_path_make_mut(trans, iter->path, true, _THIS_IP_);
path = iter->path;
path = btree_iter_path(trans, iter);
if (!*write_locked) {
ret = bch2_btree_node_lock_write(trans, path, &path->l[0].b->c);
@ -300,7 +302,7 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
}
if (write_locked) {
struct btree_path *path = iter.path;
struct btree_path *path = btree_iter_path(trans, &iter);
if (path->btree_id != i->btree ||
bpos_gt(k->k.k.p, path->l[0].b->key.k.p)) {
@ -316,7 +318,7 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
}
bch2_btree_iter_set_pos(&iter, k->k.k.p);
iter.path->preserve = false;
btree_iter_path(trans, &iter)->preserve = false;
do {
if (race_fault()) {
@ -338,8 +340,10 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
break;
}
if (write_locked)
bch2_btree_node_unlock_write(trans, iter.path, iter.path->l[0].b);
if (write_locked) {
struct btree_path *path = btree_iter_path(trans, &iter);
bch2_btree_node_unlock_write(trans, path, path->l[0].b);
}
bch2_trans_iter_exit(trans, &iter);
if (ret)

View file

@ -460,7 +460,8 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
for_each_btree_key(trans, iter, i->id, i->from,
BTREE_ITER_PREFETCH|
BTREE_ITER_ALL_SNAPSHOTS, k, ({
struct btree_path_level *l = &iter.path->l[0];
struct btree_path_level *l =
&btree_iter_path(trans, &iter)->l[0];
struct bkey_packed *_k =
bch2_btree_node_iter_peek(&l->iter, l->b);

View file

@ -1668,7 +1668,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
goto err;
}
BUG_ON(!iter->path->should_be_locked);
BUG_ON(!btree_iter_path(trans, iter)->should_be_locked);
i = walk_inode(trans, dir, equiv, k.k->type == KEY_TYPE_whiteout);
ret = PTR_ERR_OR_ZERO(i);