bcachefs: Refactor bch2_extent_trim_atomic() for reflink

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2019-08-16 09:58:07 -04:00 committed by Kent Overstreet
parent 63069bb6bf
commit 3c7f3b7aeb
7 changed files with 105 additions and 31 deletions

View file

@ -400,8 +400,7 @@ static inline void btree_insert_entry_checks(struct btree_trans *trans,
BUG_ON(i->iter->level);
BUG_ON(bkey_cmp(bkey_start_pos(&i->k->k), i->iter->pos));
EBUG_ON((i->iter->flags & BTREE_ITER_IS_EXTENTS) &&
!bch2_extent_is_atomic(i->k, i->iter));
bkey_cmp(i->k->k.p, i->iter->l[0].b->key.k.p) > 0);
EBUG_ON((i->iter->flags & BTREE_ITER_IS_EXTENTS) &&
!(trans->flags & BTREE_INSERT_ATOMIC));
}
@ -1031,7 +1030,10 @@ int bch2_btree_delete_at_range(struct btree_trans *trans,
/* create the biggest key we can */
bch2_key_resize(&delete.k, max_sectors);
bch2_cut_back(end, &delete.k);
bch2_extent_trim_atomic(&delete, iter);
ret = bch2_extent_trim_atomic(&delete, iter);
if (ret)
break;
}
bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &delete));

View file

@ -1255,9 +1255,6 @@ int bch2_mark_update(struct btree_trans *trans,
if (!btree_node_type_needs_gc(iter->btree_id))
return 0;
EBUG_ON(btree_node_is_extents(b) &&
!bch2_extent_is_atomic(insert->k, insert->iter));
if (!(trans->flags & BTREE_INSERT_NOMARK_INSERT))
bch2_mark_key_locked(c, bkey_i_to_s_c(insert->k),
0, insert->k->k.size,
@ -1636,9 +1633,6 @@ int bch2_trans_mark_update(struct btree_trans *trans,
if (!btree_node_type_needs_gc(iter->btree_id))
return 0;
EBUG_ON(btree_node_is_extents(b) &&
!bch2_extent_is_atomic(insert, iter));
ret = bch2_trans_mark_key(trans, bkey_i_to_s_c(insert),
0, insert->k.size, BCH_BUCKET_MARK_INSERT);
if (ret)

View file

@ -949,47 +949,104 @@ static unsigned bch2_bkey_nr_alloc_ptrs(struct bkey_s_c k)
return ret;
}
static inline struct bpos
bch2_extent_atomic_end(struct bkey_i *insert, struct btree_iter *iter)
static int __bch2_extent_atomic_end(struct btree_trans *trans,
struct bkey_s_c k,
unsigned offset,
struct bpos *end,
unsigned *nr_iters,
unsigned max_iters)
{
int ret = 0;
switch (k.k->type) {
case KEY_TYPE_extent:
*nr_iters += bch2_bkey_nr_alloc_ptrs(k);
if (*nr_iters >= max_iters) {
*end = bpos_min(*end, k.k->p);
return 0;
}
break;
}
return ret;
}
int bch2_extent_atomic_end(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_i *insert,
struct bpos *end)
{
struct btree *b = iter->l[0].b;
struct btree_node_iter node_iter = iter->l[0].iter;
struct bkey_packed *_k;
unsigned nr_alloc_ptrs =
unsigned nr_iters =
bch2_bkey_nr_alloc_ptrs(bkey_i_to_s_c(insert));
int ret = 0;
BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
BUG_ON(bkey_cmp(bkey_start_pos(&insert->k), b->data->min_key) < 0);
while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
*end = bpos_min(insert->k.p, b->key.k.p);
ret = __bch2_extent_atomic_end(trans, bkey_i_to_s_c(insert),
0, end, &nr_iters, 10);
if (ret)
return ret;
while (nr_iters < 20 &&
(_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
KEY_TYPE_discard))) {
struct bkey unpacked;
struct bkey_s_c k = bkey_disassemble(b, _k, &unpacked);
unsigned offset = 0;
if (bkey_cmp(insert->k.p, bkey_start_pos(k.k)) <= 0)
if (bkey_cmp(bkey_start_pos(k.k), *end) >= 0)
break;
nr_alloc_ptrs += bch2_bkey_nr_alloc_ptrs(k);
if (bkey_cmp(bkey_start_pos(&insert->k),
bkey_start_pos(k.k)) > 0)
offset = bkey_start_offset(&insert->k) -
bkey_start_offset(k.k);
if (nr_alloc_ptrs > 20) {
BUG_ON(bkey_cmp(k.k->p, bkey_start_pos(&insert->k)) <= 0);
return bpos_min(insert->k.p, k.k->p);
}
ret = __bch2_extent_atomic_end(trans, k, offset,
end, &nr_iters, 20);
if (ret)
return ret;
if (nr_iters >= 20)
break;
bch2_btree_node_iter_advance(&node_iter, b);
}
return bpos_min(insert->k.p, b->key.k.p);
return 0;
}
void bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter)
int bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter)
{
bch2_cut_back(bch2_extent_atomic_end(k, iter), &k->k);
struct bpos end;
int ret;
ret = bch2_extent_atomic_end(iter->trans, iter, k, &end);
if (ret)
return ret;
bch2_cut_back(end, &k->k);
return 0;
}
bool bch2_extent_is_atomic(struct bkey_i *k, struct btree_iter *iter)
int bch2_extent_is_atomic(struct bkey_i *k, struct btree_iter *iter)
{
return !bkey_cmp(bch2_extent_atomic_end(k, iter), k->k.p);
struct bpos end;
int ret;
ret = bch2_extent_atomic_end(iter->trans, iter, k, &end);
if (ret)
return ret;
return !bkey_cmp(end, k->k.p);
}
enum btree_insert_ret

View file

@ -425,8 +425,10 @@ enum merge_result bch2_reservation_merge(struct bch_fs *,
.key_merge = bch2_reservation_merge, \
}
void bch2_extent_trim_atomic(struct bkey_i *, struct btree_iter *);
bool bch2_extent_is_atomic(struct bkey_i *, struct btree_iter *);
int bch2_extent_atomic_end(struct btree_trans *, struct btree_iter *,
struct bkey_i *, struct bpos *);
int bch2_extent_trim_atomic(struct bkey_i *, struct btree_iter *);
int bch2_extent_is_atomic(struct bkey_i *, struct btree_iter *);
enum btree_insert_ret
bch2_extent_can_insert(struct btree_trans *, struct btree_insert_entry *,

View file

@ -310,7 +310,9 @@ int bch2_extent_update(struct btree_trans *trans,
if (ret)
return ret;
bch2_extent_trim_atomic(k, extent_iter);
ret = bch2_extent_trim_atomic(k, extent_iter);
if (ret)
return ret;
ret = sum_sector_overwrites(trans, extent_iter,
k, &allocating,
@ -2634,7 +2636,9 @@ static long bch2_fcollapse(struct bch_inode_info *inode,
bch2_cut_front(src->pos, &copy.k);
copy.k.k.p.offset -= len >> 9;
bch2_extent_trim_atomic(&copy.k, dst);
ret = bch2_extent_trim_atomic(&copy.k, dst);
if (ret)
goto bkey_err;
BUG_ON(bkey_cmp(dst->pos, bkey_start_pos(&copy.k.k)));

View file

@ -274,6 +274,8 @@ int bch2_write_index_default(struct bch_write_op *op)
bch2_verify_keylist_sorted(keys);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 256);
retry:
bch2_trans_begin(&trans);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
bkey_start_pos(&bch2_keylist_front(keys)->k),
@ -284,7 +286,9 @@ int bch2_write_index_default(struct bch_write_op *op)
bkey_copy(&split.k, bch2_keylist_front(keys));
bch2_extent_trim_atomic(&split.k, iter);
ret = bch2_extent_trim_atomic(&split.k, iter);
if (ret)
break;
bch2_trans_update(&trans,
BTREE_INSERT_ENTRY(iter, &split.k));
@ -301,6 +305,11 @@ int bch2_write_index_default(struct bch_write_op *op)
bch2_keylist_pop_front(keys);
} while (!bch2_keylist_empty(keys));
if (ret == -EINTR) {
ret = 0;
goto retry;
}
bch2_trans_exit(&trans);
return ret;

View file

@ -247,6 +247,7 @@ static int bch2_extent_replay_key(struct bch_fs *c, struct bkey_i *k)
struct disk_reservation disk_res =
bch2_disk_reservation_init(c, 0);
struct bkey_i *split;
struct bpos atomic_end;
bool split_compressed = false;
int ret;
@ -273,9 +274,14 @@ static int bch2_extent_replay_key(struct bch_fs *c, struct bkey_i *k)
if (ret)
goto err;
ret = bch2_extent_atomic_end(&trans, split_iter,
k, &atomic_end);
if (ret)
goto err;
if (!split_compressed &&
bch2_extent_is_compressed(bkey_i_to_s_c(k)) &&
!bch2_extent_is_atomic(k, split_iter)) {
bkey_cmp(atomic_end, k->k.p) < 0) {
ret = bch2_disk_reservation_add(c, &disk_res,
k->k.size *
bch2_bkey_nr_dirty_ptrs(bkey_i_to_s_c(k)),
@ -287,7 +293,7 @@ static int bch2_extent_replay_key(struct bch_fs *c, struct bkey_i *k)
bkey_copy(split, k);
bch2_cut_front(split_iter->pos, split);
bch2_extent_trim_atomic(split, split_iter);
bch2_cut_back(atomic_end, &split->k);
bch2_trans_update(&trans, BTREE_INSERT_ENTRY(split_iter, split));
bch2_btree_iter_set_pos(iter, split->k.p);