bcachefs: kill struct btree_insert

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2019-03-13 22:44:04 -04:00 committed by Kent Overstreet
parent 0564b16782
commit 0dc17247f1
7 changed files with 47 additions and 54 deletions

View file

@ -289,6 +289,13 @@ struct btree_trans {
struct btree_iter *iters;
struct btree_insert_entry *updates;
struct disk_reservation *disk_res;
/* update path: */
struct journal_res journal_res;
struct journal_preres journal_preres;
u64 *journal_seq;
unsigned flags;
struct btree_iter iters_onstack[2];
struct btree_insert_entry updates_onstack[6];

View file

@ -7,13 +7,12 @@
struct bch_fs;
struct btree;
struct btree_insert;
void bch2_btree_node_lock_for_insert(struct bch_fs *, struct btree *,
struct btree_iter *);
bool bch2_btree_bset_insert_key(struct btree_iter *, struct btree *,
struct btree_node_iter *, struct bkey_i *);
void bch2_btree_journal_key(struct btree_insert *trans, struct btree_iter *,
void bch2_btree_journal_key(struct btree_trans *, struct btree_iter *,
struct bkey_i *);
void bch2_deferred_update_free(struct bch_fs *,
@ -21,18 +20,6 @@ void bch2_deferred_update_free(struct bch_fs *,
struct deferred_update *
bch2_deferred_update_alloc(struct bch_fs *, enum btree_id, unsigned);
struct btree_insert {
struct bch_fs *c;
struct disk_reservation *disk_res;
struct journal_res journal_res;
struct journal_preres journal_preres;
u64 *journal_seq;
unsigned flags;
unsigned short nr;
struct btree_insert_entry *entries;
};
#define BTREE_INSERT_ENTRY(_iter, _k) \
((struct btree_insert_entry) { \
.iter = (_iter), \

View file

@ -18,8 +18,8 @@
#include <linux/sort.h>
static bool btree_trans_relock(struct btree_insert *);
static void btree_trans_unlock(struct btree_insert *);
static bool btree_trans_relock(struct btree_trans *);
static void btree_trans_unlock(struct btree_trans *);
/* Inserting into a given leaf node (last stage of insert): */
@ -130,7 +130,7 @@ static void btree_node_flush1(struct journal *j, struct journal_entry_pin *pin,
return __btree_node_flush(j, pin, 1, seq);
}
static inline void __btree_journal_key(struct btree_insert *trans,
static inline void __btree_journal_key(struct btree_trans *trans,
enum btree_id btree_id,
struct bkey_i *insert)
{
@ -151,7 +151,7 @@ static inline void __btree_journal_key(struct btree_insert *trans,
*trans->journal_seq = seq;
}
void bch2_btree_journal_key(struct btree_insert *trans,
void bch2_btree_journal_key(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_i *insert)
{
@ -185,7 +185,7 @@ void bch2_btree_journal_key(struct btree_insert *trans,
set_btree_node_dirty(b);
}
static void bch2_insert_fixup_key(struct btree_insert *trans,
static void bch2_insert_fixup_key(struct btree_trans *trans,
struct btree_insert_entry *insert)
{
struct btree_iter *iter = insert->iter;
@ -203,7 +203,7 @@ static void bch2_insert_fixup_key(struct btree_insert *trans,
/**
* btree_insert_key - insert a key one key into a leaf node
*/
static void btree_insert_key_leaf(struct btree_insert *trans,
static void btree_insert_key_leaf(struct btree_trans *trans,
struct btree_insert_entry *insert)
{
struct bch_fs *c = trans->c;
@ -286,7 +286,7 @@ static void deferred_update_flush(struct journal *j,
kfree(k);
}
static void btree_insert_key_deferred(struct btree_insert *trans,
static void btree_insert_key_deferred(struct btree_trans *trans,
struct btree_insert_entry *insert)
{
struct bch_fs *c = trans->c;
@ -356,24 +356,24 @@ bch2_deferred_update_alloc(struct bch_fs *c,
* We sort transaction entries so that if multiple iterators point to the same
* leaf node they'll be adjacent:
*/
static bool same_leaf_as_prev(struct btree_insert *trans,
static bool same_leaf_as_prev(struct btree_trans *trans,
struct btree_insert_entry *i)
{
return i != trans->entries &&
return i != trans->updates &&
!i->deferred &&
i[0].iter->l[0].b == i[-1].iter->l[0].b;
}
#define __trans_next_entry(_trans, _i, _filter) \
({ \
while ((_i) < (_trans)->entries + (_trans->nr) && !(_filter)) \
while ((_i) < (_trans)->updates + (_trans->nr_updates) && !(_filter))\
(_i)++; \
\
(_i) < (_trans)->entries + (_trans->nr); \
(_i) < (_trans)->updates + (_trans->nr_updates); \
})
#define __trans_for_each_entry(_trans, _i, _filter) \
for ((_i) = (_trans)->entries; \
for ((_i) = (_trans)->updates; \
__trans_next_entry(_trans, _i, _filter); \
(_i)++)
@ -404,7 +404,7 @@ inline void bch2_btree_node_lock_for_insert(struct bch_fs *c, struct btree *b,
bch2_btree_init_next(c, b, iter);
}
static void multi_lock_write(struct bch_fs *c, struct btree_insert *trans)
static void multi_lock_write(struct bch_fs *c, struct btree_trans *trans)
{
struct btree_insert_entry *i;
@ -412,7 +412,7 @@ static void multi_lock_write(struct bch_fs *c, struct btree_insert *trans)
bch2_btree_node_lock_for_insert(c, i->iter->l[0].b, i->iter);
}
static void multi_unlock_write(struct btree_insert *trans)
static void multi_unlock_write(struct btree_trans *trans)
{
struct btree_insert_entry *i;
@ -427,7 +427,7 @@ static inline int btree_trans_cmp(struct btree_insert_entry l,
btree_iter_cmp(l.iter, r.iter);
}
static bool btree_trans_relock(struct btree_insert *trans)
static bool btree_trans_relock(struct btree_trans *trans)
{
struct btree_insert_entry *i;
@ -436,7 +436,7 @@ static bool btree_trans_relock(struct btree_insert *trans)
return true;
}
static void btree_trans_unlock(struct btree_insert *trans)
static void btree_trans_unlock(struct btree_trans *trans)
{
struct btree_insert_entry *i;
@ -449,7 +449,7 @@ static void btree_trans_unlock(struct btree_insert *trans)
/* Normal update interface: */
static enum btree_insert_ret
btree_key_can_insert(struct btree_insert *trans,
btree_key_can_insert(struct btree_trans *trans,
struct btree_insert_entry *insert,
unsigned *u64s)
{
@ -477,7 +477,7 @@ btree_key_can_insert(struct btree_insert *trans,
return BTREE_INSERT_OK;
}
static inline void do_btree_insert_one(struct btree_insert *trans,
static inline void do_btree_insert_one(struct btree_trans *trans,
struct btree_insert_entry *insert)
{
if (likely(!insert->deferred))
@ -489,7 +489,7 @@ static inline void do_btree_insert_one(struct btree_insert *trans,
/*
* Get journal reservation, take write locks, and attempt to do btree update(s):
*/
static inline int do_btree_insert_at(struct btree_insert *trans,
static inline int do_btree_insert_at(struct btree_trans *trans,
struct btree_insert_entry **stopped_at)
{
struct bch_fs *c = trans->c;
@ -631,7 +631,7 @@ static inline void btree_insert_entry_checks(struct bch_fs *c,
* -EROFS: filesystem read only
* -EIO: journal or btree node IO error
*/
static int __bch2_btree_insert_at(struct btree_insert *trans)
static int __bch2_btree_insert_at(struct btree_trans *trans)
{
struct bch_fs *c = trans->c;
struct btree_insert_entry *i;
@ -639,17 +639,17 @@ static int __bch2_btree_insert_at(struct btree_insert *trans)
unsigned flags, u64s = 0;
int ret;
BUG_ON(!trans->nr);
BUG_ON(!trans->nr_updates);
/* for the sake of sanity: */
BUG_ON(trans->nr > 1 && !(trans->flags & BTREE_INSERT_ATOMIC));
BUG_ON(trans->nr_updates > 1 && !(trans->flags & BTREE_INSERT_ATOMIC));
if (trans->flags & BTREE_INSERT_GC_LOCK_HELD)
lockdep_assert_held(&c->gc_lock);
memset(&trans->journal_preres, 0, sizeof(trans->journal_preres));
bubble_sort(trans->entries, trans->nr, btree_trans_cmp);
bubble_sort(trans->updates, trans->nr_updates, btree_trans_cmp);
trans_for_each_entry(trans, i)
btree_insert_entry_checks(c, i);
@ -781,7 +781,7 @@ static int __bch2_btree_insert_at(struct btree_insert *trans)
goto out;
}
bch2_btree_iter_unlock(trans->entries[0].iter);
bch2_trans_unlock(trans);
ret = -EINTR;
trans_for_each_iter(trans, i) {
@ -830,21 +830,20 @@ int bch2_trans_commit(struct btree_trans *trans,
u64 *journal_seq,
unsigned flags)
{
struct btree_insert insert = {
.c = trans->c,
.disk_res = disk_res,
.journal_seq = journal_seq,
.flags = flags,
.nr = trans->nr_updates,
.entries = trans->updates,
};
int ret;
if (!trans->nr_updates)
return 0;
trans->disk_res = disk_res;
trans->journal_seq = journal_seq;
trans->flags = flags;
ret = __bch2_btree_insert_at(trans);
trans->nr_updates = 0;
return __bch2_btree_insert_at(&insert);
return ret;
}
int bch2_btree_delete_at(struct btree_trans *trans,

View file

@ -975,7 +975,7 @@ int bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
return ret;
}
void bch2_mark_update(struct btree_insert *trans,
void bch2_mark_update(struct btree_trans *trans,
struct btree_insert_entry *insert)
{
struct bch_fs *c = trans->c;

View file

@ -256,7 +256,7 @@ int bch2_mark_key_locked(struct bch_fs *, struct bkey_s_c,
int bch2_mark_key(struct bch_fs *, struct bkey_s_c,
bool, s64, struct gc_pos,
struct bch_fs_usage *, u64, unsigned);
void bch2_mark_update(struct btree_insert *, struct btree_insert_entry *);
void bch2_mark_update(struct btree_trans *, struct btree_insert_entry *);
int bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *,
struct disk_reservation *);

View file

@ -890,7 +890,7 @@ bool bch2_extent_is_atomic(struct bkey_i *k, struct btree_iter *iter)
}
enum btree_insert_ret
bch2_extent_can_insert(struct btree_insert *trans,
bch2_extent_can_insert(struct btree_trans *trans,
struct btree_insert_entry *insert,
unsigned *u64s)
{
@ -1164,7 +1164,7 @@ static void __bch2_insert_fixup_extent(struct bch_fs *c,
* If the end of iter->pos is not the same as the end of insert, then
* key insertion needs to continue/be retried.
*/
void bch2_insert_fixup_extent(struct btree_insert *trans,
void bch2_insert_fixup_extent(struct btree_trans *trans,
struct btree_insert_entry *insert)
{
struct bch_fs *c = trans->c;

View file

@ -7,7 +7,7 @@
#include "extents_types.h"
struct bch_fs;
struct btree_insert;
struct btree_trans;
struct btree_insert_entry;
/* extent entries: */
@ -410,9 +410,9 @@ void bch2_extent_trim_atomic(struct bkey_i *, struct btree_iter *);
bool bch2_extent_is_atomic(struct bkey_i *, struct btree_iter *);
enum btree_insert_ret
bch2_extent_can_insert(struct btree_insert *, struct btree_insert_entry *,
bch2_extent_can_insert(struct btree_trans *, struct btree_insert_entry *,
unsigned *);
void bch2_insert_fixup_extent(struct btree_insert *,
void bch2_insert_fixup_extent(struct btree_trans *,
struct btree_insert_entry *);
void bch2_extent_mark_replicas_cached(struct bch_fs *, struct bkey_s_extent,