bcachefs: Tracepoint improvements

Our types are exported to the tracepoint code, so it's not necessary to
break things out individually when passing them to tracepoints - we can
also call other functions from TP_fast_assign().

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
This commit is contained in:
Kent Overstreet 2022-08-10 12:42:55 -04:00 committed by Kent Overstreet
parent c7be3cb546
commit 9f96568c0a
9 changed files with 213 additions and 331 deletions

View file

@ -691,8 +691,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
* been freed:
*/
if (trans && !bch2_btree_node_relock(trans, path, level + 1)) {
trace_trans_restart_relock_parent_for_fill(trans->fn,
_THIS_IP_, btree_id, &path->pos);
trace_trans_restart_relock_parent_for_fill(trans, _THIS_IP_, path);
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_relock));
}
@ -700,9 +699,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
if (trans && b == ERR_PTR(-ENOMEM)) {
trans->memory_allocation_failure = true;
trace_trans_restart_memory_allocation_failure(trans->fn,
_THIS_IP_, btree_id, &path->pos);
trace_trans_restart_memory_allocation_failure(trans, _THIS_IP_, path);
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_mem_alloc_fail));
}
@ -750,8 +747,8 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
}
if (!six_relock_type(&b->c.lock, lock_type, seq)) {
trace_trans_restart_relock_after_fill(trans->fn, _THIS_IP_,
btree_id, &path->pos);
if (trans)
trace_trans_restart_relock_after_fill(trans, _THIS_IP_, path);
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_after_fill));
}
@ -906,10 +903,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *
if (bch2_btree_node_relock(trans, path, level + 1))
goto retry;
trace_trans_restart_btree_node_reused(trans->fn,
trace_ip,
path->btree_id,
&path->pos);
trace_trans_restart_btree_node_reused(trans, trace_ip, path);
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_lock_node_reused));
}
}

View file

@ -88,11 +88,6 @@ static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos
return p;
}
static inline bool is_btree_node(struct btree_path *path, unsigned l)
{
return l < BTREE_MAX_DEPTH && !IS_ERR_OR_NULL(path->l[l].b);
}
static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
{
struct bpos pos = iter->pos;
@ -195,12 +190,7 @@ bool __bch2_btree_node_relock(struct btree_trans *trans,
fail:
if (b != ERR_PTR(-BCH_ERR_no_btree_node_cached) &&
b != ERR_PTR(-BCH_ERR_no_btree_node_init))
trace_btree_node_relock_fail(trans->fn, _RET_IP_,
path->btree_id,
&path->pos,
(unsigned long) b,
path->l[level].lock_seq,
is_btree_node(path, level) ? b->c.lock.state.seq : 0);
trace_btree_node_relock_fail(trans, _RET_IP_, path, level);
return false;
}
@ -240,12 +230,7 @@ bool bch2_btree_node_upgrade(struct btree_trans *trans,
goto success;
}
trace_btree_node_upgrade_fail(trans->fn, _RET_IP_,
path->btree_id,
&path->pos,
btree_node_locked(path, level),
bch2_btree_node_lock_counts(trans, NULL, b, level),
six_lock_counts(&b->c.lock));
trace_btree_node_upgrade_fail(trans, _RET_IP_, path, level);
return false;
success:
mark_btree_node_intent_locked(trans, path, level);
@ -381,14 +366,7 @@ int __bch2_btree_node_lock(struct btree_trans *trans,
return btree_node_lock_type(trans, path, b, pos, level,
type, should_sleep_fn, p);
deadlock:
trace_trans_restart_would_deadlock(trans->fn, ip,
trans->in_traverse_all, reason,
linked->btree_id,
linked->cached,
&linked->pos,
path->btree_id,
path->cached,
&pos);
trace_trans_restart_would_deadlock(trans, ip, reason, linked, path, &pos);
return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
}
@ -438,8 +416,7 @@ int bch2_btree_path_relock_intent(struct btree_trans *trans,
if (!bch2_btree_node_relock(trans, path, l)) {
__bch2_btree_path_unlock(trans, path);
btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
trace_trans_restart_relock_path_intent(trans->fn, _RET_IP_,
path->btree_id, &path->pos);
trace_trans_restart_relock_path_intent(trans, _RET_IP_, path);
return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path_intent);
}
}
@ -454,8 +431,7 @@ static int __bch2_btree_path_relock(struct btree_trans *trans,
bool ret = btree_path_get_locks(trans, path, false);
if (!ret) {
trace_trans_restart_relock_path(trans->fn, trace_ip,
path->btree_id, &path->pos);
trace_trans_restart_relock_path(trans, trace_ip, path);
return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path);
}
@ -561,8 +537,7 @@ int bch2_trans_relock(struct btree_trans *trans)
trans_for_each_path(trans, path)
if (path->should_be_locked &&
bch2_btree_path_relock(trans, path, _RET_IP_)) {
trace_trans_restart_relock(trans->fn, _RET_IP_,
path->btree_id, &path->pos);
trace_trans_restart_relock(trans, _RET_IP_, path);
BUG_ON(!trans->restarted);
return -BCH_ERR_transaction_restart_relock;
}
@ -1529,7 +1504,7 @@ static int bch2_btree_path_traverse_all(struct btree_trans *trans)
trans->in_traverse_all = false;
trace_trans_traverse_all(trans->fn, trace_ip);
trace_trans_traverse_all(trans, trace_ip);
return ret;
}
@ -1666,7 +1641,7 @@ int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
u64 max = ~(~0ULL << restart_probability_bits);
if (!get_random_u32_below(max)) {
trace_transaction_restart_injected(trans->fn, _RET_IP_);
trace_transaction_restart_injected(trans, _RET_IP_);
return btree_trans_restart(trans, BCH_ERR_transaction_restart_fault_inject);
}
}
@ -1798,7 +1773,6 @@ static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btr
static inline void __bch2_path_free(struct btree_trans *trans, struct btree_path *path)
{
trace_btree_path_free(trans->fn, _RET_IP_, path->btree_id, &path->pos);
__bch2_btree_path_unlock(trans, path);
btree_path_list_remove(trans, path);
trans->paths_allocated &= ~(1ULL << path->idx);
@ -1891,10 +1865,10 @@ void bch2_dump_trans_paths_updates(struct btree_trans *trans)
bch2_bpos_to_text(&buf, path->pos);
printk(KERN_ERR "path: idx %u ref %u:%u%s%s btree=%s l=%u pos %s locks %u %pS\n",
printk(KERN_ERR "path: idx %2u ref %u:%u %c %c btree=%s l=%u pos %s locks %u %pS\n",
path->idx, path->ref, path->intent_ref,
path->should_be_locked ? " S" : "",
path->preserve ? " P" : "",
path->preserve ? 'P' : ' ',
path->should_be_locked ? 'S' : ' ',
bch2_btree_ids[path->btree_id],
path->level,
buf.buf,
@ -1974,8 +1948,6 @@ struct btree_path *bch2_path_get(struct btree_trans *trans,
__btree_path_get(path_pos, intent);
path = bch2_btree_path_set_pos(trans, path_pos, pos, intent);
} else {
trace_btree_path_alloc(trans->fn, _RET_IP_, btree_id, &pos, locks_want);
path = btree_path_alloc(trans, path_pos);
path_pos = NULL;
@ -2150,8 +2122,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
path->l[path->level].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
path->l[path->level + 1].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
trace_trans_restart_relock_next_node(trans->fn, _THIS_IP_,
path->btree_id, &path->pos);
trace_trans_restart_relock_next_node(trans, _THIS_IP_, path);
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
goto err;
}
@ -3185,7 +3156,7 @@ void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
trans->mem_bytes = new_bytes;
if (old_bytes) {
trace_trans_restart_mem_realloced(trans->fn, _RET_IP_, new_bytes);
trace_trans_restart_mem_realloced(trans, _RET_IP_, new_bytes);
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced));
}
}

View file

@ -405,7 +405,7 @@ static inline struct bkey_s_c bch2_btree_iter_peek_upto_type(struct btree_iter *
static inline int btree_trans_too_many_iters(struct btree_trans *trans)
{
if (hweight64(trans->paths_allocated) > BTREE_ITER_MAX / 2) {
trace_trans_restart_too_many_iters(trans->fn, _THIS_IP_);
trace_trans_restart_too_many_iters(trans, _THIS_IP_);
return btree_trans_restart(trans, BCH_ERR_transaction_restart_too_many_iters);
}

View file

@ -291,8 +291,7 @@ static int btree_key_cache_fill(struct btree_trans *trans,
k = bch2_btree_path_peek_slot(path, &u);
if (!bch2_btree_node_relock(trans, ck_path, 0)) {
trace_trans_restart_relock_key_cache_fill(trans->fn,
_THIS_IP_, ck_path->btree_id, &ck_path->pos);
trace_trans_restart_relock_key_cache_fill(trans, _THIS_IP_, ck_path);
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced);
goto err;
}
@ -420,7 +419,7 @@ int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path
*/
if (!path->locks_want &&
!__bch2_btree_path_upgrade(trans, path, 1)) {
trace_transaction_restart_key_cache_upgrade(trans->fn, _THIS_IP_);
trace_transaction_restart_key_cache_upgrade(trans, _THIS_IP_);
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_upgrade);
goto err;
}

View file

@ -13,6 +13,11 @@
#include "btree_iter.h"
#include "six.h"
static inline bool is_btree_node(struct btree_path *path, unsigned l)
{
return l < BTREE_MAX_DEPTH && !IS_ERR_OR_NULL(path->l[l].b);
}
/* matches six lock types */
enum btree_node_locked_type {
BTREE_NODE_UNLOCKED = -1,
@ -306,4 +311,7 @@ static inline void btree_path_set_level_up(struct btree_trans *trans,
btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
}
struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *,
struct btree_path *, struct btree *, unsigned);
#endif /* _BCACHEFS_BTREE_LOCKING_H */

View file

@ -994,8 +994,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
nr_nodes[1] += 1;
if (!bch2_btree_path_upgrade(trans, path, U8_MAX)) {
trace_trans_restart_iter_upgrade(trans->fn, _RET_IP_,
path->btree_id, &path->pos);
trace_trans_restart_iter_upgrade(trans, _RET_IP_, path);
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
return ERR_PTR(ret);
}
@ -1053,7 +1052,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
BTREE_UPDATE_JOURNAL_RES,
journal_flags);
if (ret) {
trace_trans_restart_journal_preres_get(trans->fn, _RET_IP_);
trace_trans_restart_journal_preres_get(trans, _RET_IP_);
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_journal_preres_get);
goto err;
}

View file

@ -285,7 +285,7 @@ bch2_trans_journal_preres_get_cold(struct btree_trans *trans, unsigned u64s,
ret = bch2_trans_relock(trans);
if (ret) {
trace_trans_restart_journal_preres_get(trans->fn, trace_ip);
trace_trans_restart_journal_preres_get(trans, trace_ip);
return ret;
}
@ -375,9 +375,7 @@ btree_key_can_insert_cached(struct btree_trans *trans,
* Keys returned by peek() are no longer valid pointers, so we need a
* transaction restart:
*/
trace_trans_restart_key_cache_key_realloced(trans->fn, _RET_IP_,
path->btree_id, &path->pos,
old_u64s, new_u64s);
trace_trans_restart_key_cache_key_realloced(trans, _RET_IP_, path, old_u64s, new_u64s);
return btree_trans_restart_nounlock(trans, BCH_ERR_transaction_restart_key_cache_realloced);
}
@ -569,7 +567,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans,
int ret;
if (race_fault()) {
trace_trans_restart_fault_inject(trans->fn, trace_ip);
trace_trans_restart_fault_inject(trans, trace_ip);
return btree_trans_restart_nounlock(trans, BCH_ERR_transaction_restart_fault_inject);
}
@ -837,7 +835,7 @@ static inline int trans_lock_write(struct btree_trans *trans)
bch2_btree_node_unlock_write_inlined(trans, i->path, insert_l(i)->b);
}
trace_trans_restart_would_deadlock_write(trans->fn);
trace_trans_restart_would_deadlock_write(trans);
return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock_write);
}
@ -970,8 +968,7 @@ int bch2_trans_commit_error(struct btree_trans *trans,
case BTREE_INSERT_BTREE_NODE_FULL:
ret = bch2_btree_split_leaf(trans, i->path, trans->flags);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
trace_trans_restart_btree_node_split(trans->fn, trace_ip,
i->btree_id, &i->path->pos);
trace_trans_restart_btree_node_split(trans, trace_ip, i->path);
break;
case BTREE_INSERT_NEED_MARK_REPLICAS:
bch2_trans_unlock(trans);
@ -982,7 +979,7 @@ int bch2_trans_commit_error(struct btree_trans *trans,
ret = bch2_trans_relock(trans);
if (ret)
trace_trans_restart_mark_replicas(trans->fn, trace_ip);
trace_trans_restart_mark_replicas(trans, trace_ip);
break;
case BTREE_INSERT_NEED_JOURNAL_RES:
bch2_trans_unlock(trans);
@ -999,12 +996,12 @@ int bch2_trans_commit_error(struct btree_trans *trans,
ret = bch2_trans_relock(trans);
if (ret)
trace_trans_restart_journal_res_get(trans->fn, trace_ip);
trace_trans_restart_journal_res_get(trans, trace_ip);
break;
case BTREE_INSERT_NEED_JOURNAL_RECLAIM:
bch2_trans_unlock(trans);
trace_trans_blocked_journal_reclaim(trans->fn, trace_ip);
trace_trans_blocked_journal_reclaim(trans, trace_ip);
wait_event_freezable(c->journal.reclaim_wait,
(ret = journal_reclaim_wait_done(c)));
@ -1013,7 +1010,7 @@ int bch2_trans_commit_error(struct btree_trans *trans,
ret = bch2_trans_relock(trans);
if (ret)
trace_trans_restart_journal_reclaim(trans->fn, trace_ip);
trace_trans_restart_journal_reclaim(trans, trace_ip);
break;
default:
BUG_ON(ret >= 0);
@ -1116,8 +1113,7 @@ int __bch2_trans_commit(struct btree_trans *trans)
BUG_ON(!i->path->should_be_locked);
if (unlikely(!bch2_btree_path_upgrade(trans, i->path, i->level + 1))) {
trace_trans_restart_upgrade(trans->fn, _RET_IP_,
i->btree_id, &i->path->pos);
trace_trans_restart_upgrade(trans, _RET_IP_, i->path);
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
goto out;
}
@ -1163,7 +1159,7 @@ int __bch2_trans_commit(struct btree_trans *trans)
if (ret)
goto err;
trace_transaction_commit(trans->fn, _RET_IP_);
trace_transaction_commit(trans, _RET_IP_);
out:
bch2_journal_preres_put(&c->journal, &trans->journal_preres);
@ -1639,7 +1635,7 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter
ck = (void *) iter->key_cache_path->l[0].b;
if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
trace_trans_restart_key_cache_raced(trans->fn, _RET_IP_);
trace_trans_restart_key_cache_raced(trans, _RET_IP_);
return btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced);
}

View file

@ -2,8 +2,10 @@
#include "bcachefs.h"
#include "alloc_types.h"
#include "buckets.h"
#include "btree_types.h"
#include "btree_iter.h"
#include "btree_locking.h"
#include "keylist.h"
#include "opts.h"
#include "six.h"
#include <linux/blktrace_api.h>

View file

@ -7,21 +7,29 @@
#include <linux/tracepoint.h>
#define TRACE_BPOS_entries(name) \
__field(u64, name##_inode ) \
__field(u64, name##_offset ) \
__field(u32, name##_snapshot )
#define TRACE_BPOS_assign(dst, src) \
__entry->dst##_inode = (src).inode; \
__entry->dst##_offset = (src).offset; \
__entry->dst##_snapshot = (src).snapshot
DECLARE_EVENT_CLASS(bpos,
TP_PROTO(struct bpos *p),
TP_ARGS(p),
TP_STRUCT__entry(
__field(u64, inode )
__field(u64, offset )
TRACE_BPOS_entries(p)
),
TP_fast_assign(
__entry->inode = p->inode;
__entry->offset = p->offset;
TRACE_BPOS_assign(p, *p);
),
TP_printk("%llu:%llu", __entry->inode, __entry->offset)
TP_printk("%llu:%llu:%u", __entry->p_inode, __entry->p_offset, __entry->p_snapshot)
);
DECLARE_EVENT_CLASS(bkey,
@ -230,23 +238,22 @@ DECLARE_EVENT_CLASS(btree_node,
TP_STRUCT__entry(
__field(dev_t, dev )
__field(u8, level )
__field(u8, id )
__field(u64, inode )
__field(u64, offset )
__field(u8, btree_id )
TRACE_BPOS_entries(pos)
),
TP_fast_assign(
__entry->dev = c->dev;
__entry->level = b->c.level;
__entry->id = b->c.btree_id;
__entry->inode = b->key.k.p.inode;
__entry->offset = b->key.k.p.offset;
__entry->btree_id = b->c.btree_id;
TRACE_BPOS_assign(pos, b->key.k.p);
),
TP_printk("%d,%d %u id %u %llu:%llu",
TP_printk("%d,%d %u %s %llu:%llu:%u",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->level, __entry->id,
__entry->inode, __entry->offset)
__entry->level,
bch2_btree_ids[__entry->btree_id],
__entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
);
DEFINE_EVENT(btree_node, btree_read,
@ -379,43 +386,36 @@ TRACE_EVENT(btree_cache_scan,
);
TRACE_EVENT(btree_node_relock_fail,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip,
enum btree_id btree_id,
struct bpos *pos,
unsigned long node,
u32 iter_lock_seq,
u32 node_lock_seq),
TP_ARGS(trans_fn, caller_ip, btree_id, pos, node, iter_lock_seq, node_lock_seq),
struct btree_path *path,
unsigned level),
TP_ARGS(trans, caller_ip, path, level),
TP_STRUCT__entry(
__array(char, trans_fn, 24 )
__field(unsigned long, caller_ip )
__field(u8, btree_id )
__field(u64, pos_inode )
__field(u64, pos_offset )
__field(u32, pos_snapshot )
TRACE_BPOS_entries(pos)
__field(unsigned long, node )
__field(u32, iter_lock_seq )
__field(u32, node_lock_seq )
),
TP_fast_assign(
strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->btree_id = btree_id;
__entry->pos_inode = pos->inode;
__entry->pos_offset = pos->offset;
__entry->pos_snapshot = pos->snapshot;
__entry->node = node;
__entry->iter_lock_seq = iter_lock_seq;
__entry->node_lock_seq = node_lock_seq;
__entry->btree_id = path->btree_id;
TRACE_BPOS_assign(pos, path->pos);
__entry->node = (unsigned long) btree_path_node(path, level);
__entry->iter_lock_seq = path->l[level].lock_seq;
__entry->node_lock_seq = is_btree_node(path, level) ? path->l[level].b->c.lock.state.seq : 0;
),
TP_printk("%s %pS btree %u pos %llu:%llu:%u, node %lu iter seq %u lock seq %u",
TP_printk("%s %pS btree %s pos %llu:%llu:%u, node %lu iter seq %u lock seq %u",
__entry->trans_fn,
(void *) __entry->caller_ip,
__entry->btree_id,
bch2_btree_ids[__entry->btree_id],
__entry->pos_inode,
__entry->pos_offset,
__entry->pos_snapshot,
@ -425,48 +425,45 @@ TRACE_EVENT(btree_node_relock_fail,
);
TRACE_EVENT(btree_node_upgrade_fail,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip,
enum btree_id btree_id,
struct bpos *pos,
bool locked,
struct six_lock_count self_lock_count,
struct six_lock_count lock_count),
TP_ARGS(trans_fn, caller_ip, btree_id, pos,
locked, self_lock_count, lock_count),
struct btree_path *path,
unsigned level),
TP_ARGS(trans, caller_ip, path, level),
TP_STRUCT__entry(
__array(char, trans_fn, 24 )
__field(unsigned long, caller_ip )
__field(u8, btree_id )
__field(u64, pos_inode )
__field(u64, pos_offset )
__field(u32, pos_snapshot )
TRACE_BPOS_entries(pos)
__field(u8, locked )
__field(u8, self_read_count )
__field(u8, read_count )
__field(u8, self_intent_count)
__field(u8, read_count )
__field(u8, intent_count )
),
TP_fast_assign(
strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
struct six_lock_count c;
strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->btree_id = btree_id;
__entry->pos_inode = pos->inode;
__entry->pos_offset = pos->offset;
__entry->pos_snapshot = pos->snapshot;
__entry->locked = locked;
__entry->self_read_count = self_lock_count.read;
__entry->self_intent_count = self_lock_count.intent;
__entry->read_count = lock_count.read;
__entry->intent_count = lock_count.intent;
__entry->btree_id = path->btree_id;
TRACE_BPOS_assign(pos, path->pos);
__entry->locked = btree_node_locked(path, level);
c = bch2_btree_node_lock_counts(trans, NULL, path->l[level].b, level),
__entry->self_read_count = c.read;
__entry->self_intent_count = c.intent;
c = six_lock_counts(&path->l[level].b->c.lock);
__entry->read_count = c.read;
__entry->intent_count = c.intent;
),
TP_printk("%s %pS btree %u pos %llu:%llu:%u, locked %u held %u:%u lock count %u:%u",
TP_printk("%s %pS btree %s pos %llu:%llu:%u, locked %u held %u:%u lock count %u:%u",
__entry->trans_fn,
(void *) __entry->caller_ip,
__entry->btree_id,
bch2_btree_ids[__entry->btree_id],
__entry->pos_inode,
__entry->pos_offset,
__entry->pos_snapshot,
@ -731,9 +728,9 @@ TRACE_EVENT(copygc_wait,
);
DECLARE_EVENT_CLASS(transaction_event,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip),
TP_ARGS(trans_fn, caller_ip),
TP_ARGS(trans, caller_ip),
TP_STRUCT__entry(
__array(char, trans_fn, 24 )
@ -741,7 +738,7 @@ DECLARE_EVENT_CLASS(transaction_event,
),
TP_fast_assign(
strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
),
@ -749,229 +746,206 @@ DECLARE_EVENT_CLASS(transaction_event,
);
DEFINE_EVENT(transaction_event, transaction_commit,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip),
TP_ARGS(trans_fn, caller_ip)
TP_ARGS(trans, caller_ip)
);
DEFINE_EVENT(transaction_event, transaction_restart_injected,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip),
TP_ARGS(trans_fn, caller_ip)
TP_ARGS(trans, caller_ip)
);
DEFINE_EVENT(transaction_event, trans_blocked_journal_reclaim,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip),
TP_ARGS(trans_fn, caller_ip)
TP_ARGS(trans, caller_ip)
);
DEFINE_EVENT(transaction_event, trans_restart_journal_res_get,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip),
TP_ARGS(trans_fn, caller_ip)
TP_ARGS(trans, caller_ip)
);
DEFINE_EVENT(transaction_event, trans_restart_journal_preres_get,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip),
TP_ARGS(trans_fn, caller_ip)
TP_ARGS(trans, caller_ip)
);
DEFINE_EVENT(transaction_event, trans_restart_journal_reclaim,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip),
TP_ARGS(trans_fn, caller_ip)
TP_ARGS(trans, caller_ip)
);
DEFINE_EVENT(transaction_event, trans_restart_fault_inject,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip),
TP_ARGS(trans_fn, caller_ip)
TP_ARGS(trans, caller_ip)
);
DEFINE_EVENT(transaction_event, trans_traverse_all,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip),
TP_ARGS(trans_fn, caller_ip)
TP_ARGS(trans, caller_ip)
);
DEFINE_EVENT(transaction_event, trans_restart_mark_replicas,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip),
TP_ARGS(trans_fn, caller_ip)
TP_ARGS(trans, caller_ip)
);
DEFINE_EVENT(transaction_event, trans_restart_key_cache_raced,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip),
TP_ARGS(trans_fn, caller_ip)
TP_ARGS(trans, caller_ip)
);
DEFINE_EVENT(transaction_event, trans_restart_too_many_iters,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip),
TP_ARGS(trans_fn, caller_ip)
TP_ARGS(trans, caller_ip)
);
DECLARE_EVENT_CLASS(transaction_restart_iter,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip,
enum btree_id btree_id,
struct bpos *pos),
TP_ARGS(trans_fn, caller_ip, btree_id, pos),
struct btree_path *path),
TP_ARGS(trans, caller_ip, path),
TP_STRUCT__entry(
__array(char, trans_fn, 24 )
__field(unsigned long, caller_ip )
__field(u8, btree_id )
__field(u64, pos_inode )
__field(u64, pos_offset )
__field(u32, pos_snapshot )
TRACE_BPOS_entries(pos)
),
TP_fast_assign(
strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->btree_id = btree_id;
__entry->pos_inode = pos->inode;
__entry->pos_offset = pos->offset;
__entry->pos_snapshot = pos->snapshot;
__entry->btree_id = path->btree_id;
TRACE_BPOS_assign(pos, path->pos)
),
TP_printk("%s %pS btree %u pos %llu:%llu:%u",
TP_printk("%s %pS btree %s pos %llu:%llu:%u",
__entry->trans_fn,
(void *) __entry->caller_ip,
__entry->btree_id,
bch2_btree_ids[__entry->btree_id],
__entry->pos_inode,
__entry->pos_offset,
__entry->pos_snapshot)
);
DEFINE_EVENT(transaction_restart_iter, trans_restart_btree_node_reused,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip,
enum btree_id btree_id,
struct bpos *pos),
TP_ARGS(trans_fn, caller_ip, btree_id, pos)
struct btree_path *path),
TP_ARGS(trans, caller_ip, path)
);
DEFINE_EVENT(transaction_restart_iter, trans_restart_btree_node_split,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip,
enum btree_id btree_id,
struct bpos *pos),
TP_ARGS(trans_fn, caller_ip, btree_id, pos)
struct btree_path *path),
TP_ARGS(trans, caller_ip, path)
);
DEFINE_EVENT(transaction_restart_iter, trans_restart_upgrade,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip,
enum btree_id btree_id,
struct bpos *pos),
TP_ARGS(trans_fn, caller_ip, btree_id, pos)
struct btree_path *path),
TP_ARGS(trans, caller_ip, path)
);
DEFINE_EVENT(transaction_restart_iter, trans_restart_iter_upgrade,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip,
enum btree_id btree_id,
struct bpos *pos),
TP_ARGS(trans_fn, caller_ip, btree_id, pos)
struct btree_path *path),
TP_ARGS(trans, caller_ip, path)
);
DEFINE_EVENT(transaction_restart_iter, trans_restart_relock,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip,
enum btree_id btree_id,
struct bpos *pos),
TP_ARGS(trans_fn, caller_ip, btree_id, pos)
struct btree_path *path),
TP_ARGS(trans, caller_ip, path)
);
DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_next_node,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip,
enum btree_id btree_id,
struct bpos *pos),
TP_ARGS(trans_fn, caller_ip, btree_id, pos)
struct btree_path *path),
TP_ARGS(trans, caller_ip, path)
);
DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_parent_for_fill,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip,
enum btree_id btree_id,
struct bpos *pos),
TP_ARGS(trans_fn, caller_ip, btree_id, pos)
struct btree_path *path),
TP_ARGS(trans, caller_ip, path)
);
DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_after_fill,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip,
enum btree_id btree_id,
struct bpos *pos),
TP_ARGS(trans_fn, caller_ip, btree_id, pos)
struct btree_path *path),
TP_ARGS(trans, caller_ip, path)
);
DEFINE_EVENT(transaction_event, transaction_restart_key_cache_upgrade,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip),
TP_ARGS(trans_fn, caller_ip)
TP_ARGS(trans, caller_ip)
);
DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_key_cache_fill,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip,
enum btree_id btree_id,
struct bpos *pos),
TP_ARGS(trans_fn, caller_ip, btree_id, pos)
struct btree_path *path),
TP_ARGS(trans, caller_ip, path)
);
DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_path,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip,
enum btree_id btree_id,
struct bpos *pos),
TP_ARGS(trans_fn, caller_ip, btree_id, pos)
struct btree_path *path),
TP_ARGS(trans, caller_ip, path)
);
DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_path_intent,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip,
enum btree_id btree_id,
struct bpos *pos),
TP_ARGS(trans_fn, caller_ip, btree_id, pos)
struct btree_path *path),
TP_ARGS(trans, caller_ip, path)
);
DEFINE_EVENT(transaction_restart_iter, trans_restart_traverse,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip,
enum btree_id btree_id,
struct bpos *pos),
TP_ARGS(trans_fn, caller_ip, btree_id, pos)
struct btree_path *path),
TP_ARGS(trans, caller_ip, path)
);
DEFINE_EVENT(transaction_restart_iter, trans_restart_memory_allocation_failure,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip,
enum btree_id btree_id,
struct bpos *pos),
TP_ARGS(trans_fn, caller_ip, btree_id, pos)
struct btree_path *path),
TP_ARGS(trans, caller_ip, path)
);
TRACE_EVENT(trans_restart_would_deadlock,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip,
bool in_traverse_all,
unsigned reason,
enum btree_id have_btree_id,
unsigned have_iter_type,
struct bpos *have_pos,
enum btree_id want_btree_id,
unsigned want_iter_type,
struct btree_path *have,
struct btree_path *want,
struct bpos *want_pos),
TP_ARGS(trans_fn, caller_ip, in_traverse_all, reason,
have_btree_id, have_iter_type, have_pos,
want_btree_id, want_iter_type, want_pos),
TP_ARGS(trans, caller_ip, reason,
have, want, want_pos),
TP_STRUCT__entry(
__array(char, trans_fn, 24 )
@ -979,35 +953,24 @@ TRACE_EVENT(trans_restart_would_deadlock,
__field(u8, in_traverse_all )
__field(u8, reason )
__field(u8, have_btree_id )
__field(u8, have_iter_type )
__field(u8, have_type )
__field(u8, want_btree_id )
__field(u8, want_iter_type )
__field(u64, have_pos_inode )
__field(u64, have_pos_offset )
__field(u32, have_pos_snapshot)
__field(u32, want_pos_snapshot)
__field(u64, want_pos_inode )
__field(u64, want_pos_offset )
__field(u8, want_type )
TRACE_BPOS_entries(have_pos)
TRACE_BPOS_entries(want_pos)
),
TP_fast_assign(
strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->in_traverse_all = in_traverse_all;
__entry->in_traverse_all = trans->in_traverse_all;
__entry->reason = reason;
__entry->have_btree_id = have_btree_id;
__entry->have_iter_type = have_iter_type;
__entry->want_btree_id = want_btree_id;
__entry->want_iter_type = want_iter_type;
__entry->have_pos_inode = have_pos->inode;
__entry->have_pos_offset = have_pos->offset;
__entry->have_pos_snapshot = have_pos->snapshot;
__entry->want_pos_inode = want_pos->inode;
__entry->want_pos_offset = want_pos->offset;
__entry->want_pos_snapshot = want_pos->snapshot;
__entry->have_btree_id = have->btree_id;
__entry->have_type = have->cached;
__entry->want_btree_id = want->btree_id;
__entry->want_type = want->cached;
TRACE_BPOS_assign(have_pos, have->pos);
TRACE_BPOS_assign(want_pos, *want_pos);
),
TP_printk("%s %pS traverse_all %u because %u have %u:%u %llu:%llu:%u want %u:%u %llu:%llu:%u",
@ -1016,37 +979,37 @@ TRACE_EVENT(trans_restart_would_deadlock,
__entry->in_traverse_all,
__entry->reason,
__entry->have_btree_id,
__entry->have_iter_type,
__entry->have_type,
__entry->have_pos_inode,
__entry->have_pos_offset,
__entry->have_pos_snapshot,
__entry->want_btree_id,
__entry->want_iter_type,
__entry->want_type,
__entry->want_pos_inode,
__entry->want_pos_offset,
__entry->want_pos_snapshot)
);
TRACE_EVENT(trans_restart_would_deadlock_write,
TP_PROTO(const char *trans_fn),
TP_ARGS(trans_fn),
TP_PROTO(struct btree_trans *trans),
TP_ARGS(trans),
TP_STRUCT__entry(
__array(char, trans_fn, 24 )
),
TP_fast_assign(
strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
),
TP_printk("%s", __entry->trans_fn)
);
TRACE_EVENT(trans_restart_mem_realloced,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip,
unsigned long bytes),
TP_ARGS(trans_fn, caller_ip, bytes),
TP_ARGS(trans, caller_ip, bytes),
TP_STRUCT__entry(
__array(char, trans_fn, 24 )
@ -1055,7 +1018,7 @@ TRACE_EVENT(trans_restart_mem_realloced,
),
TP_fast_assign(
strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->bytes = bytes;
),
@ -1067,32 +1030,28 @@ TRACE_EVENT(trans_restart_mem_realloced,
);
TRACE_EVENT(trans_restart_key_cache_key_realloced,
TP_PROTO(const char *trans_fn,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip,
enum btree_id btree_id,
struct bpos *pos,
struct btree_path *path,
unsigned old_u64s,
unsigned new_u64s),
TP_ARGS(trans_fn, caller_ip, btree_id, pos, old_u64s, new_u64s),
TP_ARGS(trans, caller_ip, path, old_u64s, new_u64s),
TP_STRUCT__entry(
__array(char, trans_fn, 24 )
__field(unsigned long, caller_ip )
__field(enum btree_id, btree_id )
__field(u64, inode )
__field(u64, offset )
__field(u32, snapshot )
TRACE_BPOS_entries(pos)
__field(u32, old_u64s )
__field(u32, new_u64s )
),
TP_fast_assign(
strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->btree_id = btree_id;
__entry->inode = pos->inode;
__entry->offset = pos->offset;
__entry->snapshot = pos->snapshot;
strlcpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->btree_id = path->btree_id;
TRACE_BPOS_assign(pos, path->pos);
__entry->old_u64s = old_u64s;
__entry->new_u64s = new_u64s;
),
@ -1101,57 +1060,11 @@ TRACE_EVENT(trans_restart_key_cache_key_realloced,
__entry->trans_fn,
(void *) __entry->caller_ip,
bch2_btree_ids[__entry->btree_id],
__entry->inode,
__entry->offset,
__entry->snapshot,
__entry->old_u64s,
__entry->new_u64s)
);
TRACE_EVENT(btree_path_alloc,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip,
enum btree_id btree_id,
struct bpos *pos,
unsigned locks_want),
TP_ARGS(trans_fn, caller_ip, btree_id, pos, locks_want),
TP_STRUCT__entry(
__array(char, trans_fn, 24 )
__field(unsigned long, caller_ip )
__field(u8, btree_id )
__field(u8, locks_want )
__field(u64, pos_inode )
__field(u64, pos_offset )
__field(u32, pos_snapshot )
),
TP_fast_assign(
strlcpy(__entry->trans_fn, trans_fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->btree_id = btree_id;
__entry->locks_want = locks_want;
__entry->pos_inode = pos->inode;
__entry->pos_offset = pos->offset;
__entry->pos_snapshot = pos->snapshot;
),
TP_printk("%s %pS btree %u locks_want %u pos %llu:%llu:%u",
__entry->trans_fn,
(void *) __entry->caller_ip,
__entry->btree_id,
__entry->locks_want,
__entry->pos_inode,
__entry->pos_offset,
__entry->pos_snapshot)
);
DEFINE_EVENT(transaction_restart_iter, btree_path_free,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip,
enum btree_id btree_id,
struct bpos *pos),
TP_ARGS(trans_fn, caller_ip, btree_id, pos)
__entry->pos_snapshot,
__entry->old_u64s,
__entry->new_u64s)
);
#endif /* _TRACE_BCACHEFS_H */