bcachefs: Tracepoint improvements

Delete some obsolete tracepoints, organize alloc tracepoints better,
make a few tracepoints more consistent.

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
This commit is contained in:
Kent Overstreet 2022-04-17 18:06:31 -04:00 committed by Kent Overstreet
parent 8cc052db63
commit 1f93726e63
8 changed files with 112 additions and 131 deletions

View file

@ -1066,7 +1066,7 @@ static void bch2_do_discards_work(struct work_struct *work)
percpu_ref_put(&c->writes);
trace_do_discards(c, seen, open, need_journal_commit, discarded, ret);
trace_discard_buckets(c, seen, open, need_journal_commit, discarded, ret);
}
void bch2_do_discards(struct bch_fs *c)
@ -1130,6 +1130,10 @@ static int invalidate_one_bucket(struct btree_trans *trans, struct bch_dev *ca)
ret = bch2_trans_update(trans, &alloc_iter, &a->k_i,
BTREE_TRIGGER_BUCKET_INVALIDATE);
if (ret)
goto out;
trace_invalidate_bucket(c, a->k.p.inode, a->k.p.offset);
out:
bch2_trans_iter_exit(trans, &alloc_iter);
bch2_trans_iter_exit(trans, &lru_iter);

View file

@ -1742,12 +1742,10 @@ static void bch2_gc_stripes_reset(struct bch_fs *c, bool metadata_only)
*/
int bch2_gc(struct bch_fs *c, bool initial, bool metadata_only)
{
u64 start_time = local_clock();
unsigned iter = 0;
int ret;
lockdep_assert_held(&c->state_lock);
trace_gc_start(c);
down_write(&c->gc_lock);
@ -1840,9 +1838,6 @@ int bch2_gc(struct bch_fs *c, bool initial, bool metadata_only)
up_write(&c->gc_lock);
trace_gc_end(c);
bch2_time_stats_update(&c->times[BCH_TIME_btree_gc], start_time);
/*
* At startup, allocations can happen directly instead of via the
* allocator thread - issue wakeup in case they blocked on gc_lock:
@ -1979,6 +1974,7 @@ int bch2_gc_gens(struct bch_fs *c)
if (!mutex_trylock(&c->gc_gens_lock))
return 0;
trace_gc_gens_start(c);
down_read(&c->gc_lock);
bch2_trans_init(&trans, c, 0, 0);
@ -2030,6 +2026,7 @@ int bch2_gc_gens(struct bch_fs *c)
c->gc_count++;
bch2_time_stats_update(&c->times[BCH_TIME_btree_gc], start_time);
trace_gc_gens_end(c);
err:
for_each_member_device(ca, c, i) {
kvfree(ca->oldest_gen);

View file

@ -1812,10 +1812,8 @@ int bch2_btree_node_rewrite(struct btree_trans *trans,
as = bch2_btree_update_start(trans, iter->path, b->c.level,
false, flags);
ret = PTR_ERR_OR_ZERO(as);
if (ret) {
trace_btree_gc_rewrite_node_fail(c, b);
if (ret)
goto out;
}
bch2_btree_interior_update_will_free_node(as, b);
@ -1825,7 +1823,7 @@ int bch2_btree_node_rewrite(struct btree_trans *trans,
bch2_btree_build_aux_trees(n);
six_unlock_write(&n->c.lock);
trace_btree_gc_rewrite_node(c, b);
trace_btree_rewrite(c, b);
bch2_btree_node_write(c, n, SIX_LOCK_intent, 0);

View file

@ -1169,6 +1169,8 @@ int __bch2_trans_commit(struct btree_trans *trans)
if (ret)
goto err;
trace_transaction_commit(trans->fn, _RET_IP_);
out:
bch2_journal_preres_put(&c->journal, &trans->journal_preres);

View file

@ -600,9 +600,6 @@ int bch2_mark_alloc(struct btree_trans *trans,
bch2_fs_fatal_error(c, "bch2_mark_alloc(): no replicas entry while updating cached sectors");
return ret;
}
trace_invalidate(ca, bucket_to_sector(ca, new.k->p.offset),
old_a.cached_sectors);
}
return 0;

View file

@ -593,7 +593,7 @@ static u64 journal_seq_to_flush(struct journal *j)
* 512 journal entries or 25% of all journal buckets, then
* journal_next_bucket() should not stall.
*/
static int __bch2_journal_reclaim(struct journal *j, bool direct)
static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
bool kthread = (current->flags & PF_KTHREAD) != 0;
@ -639,8 +639,10 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct)
if (fifo_free(&j->pin) <= 32)
min_nr = 1;
trace_journal_reclaim_start(c,
min_nr,
min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128);
trace_journal_reclaim_start(c, direct, kicked,
min_nr, min_key_cache,
j->prereserved.reserved,
j->prereserved.remaining,
atomic_read(&c->btree_cache.dirty),
@ -648,8 +650,6 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct)
atomic_long_read(&c->btree_key_cache.nr_dirty),
atomic_long_read(&c->btree_key_cache.nr_keys));
min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128);
nr_flushed = journal_flush_pins(j, seq_to_flush,
min_nr, min_key_cache);
@ -670,7 +670,7 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct)
int bch2_journal_reclaim(struct journal *j)
{
return __bch2_journal_reclaim(j, true);
return __bch2_journal_reclaim(j, true, true);
}
static int bch2_journal_reclaim_thread(void *arg)
@ -686,10 +686,12 @@ static int bch2_journal_reclaim_thread(void *arg)
j->last_flushed = jiffies;
while (!ret && !kthread_should_stop()) {
bool kicked = j->reclaim_kicked;
j->reclaim_kicked = false;
mutex_lock(&j->reclaim_lock);
ret = __bch2_journal_reclaim(j, false);
ret = __bch2_journal_reclaim(j, false, kicked);
mutex_unlock(&j->reclaim_lock);
now = jiffies;

View file

@ -597,7 +597,7 @@ static int bch2_move_extent(struct btree_trans *trans,
err_free:
kfree(io);
err:
trace_move_alloc_fail(k.k);
trace_move_alloc_mem_fail(k.k);
return ret;
}

View file

@ -142,17 +142,21 @@ DEFINE_EVENT(bio, journal_write,
);
TRACE_EVENT(journal_reclaim_start,
TP_PROTO(struct bch_fs *c, u64 min_nr,
TP_PROTO(struct bch_fs *c, bool direct, bool kicked,
u64 min_nr, u64 min_key_cache,
u64 prereserved, u64 prereserved_total,
u64 btree_cache_dirty, u64 btree_cache_total,
u64 btree_key_cache_dirty, u64 btree_key_cache_total),
TP_ARGS(c, min_nr, prereserved, prereserved_total,
TP_ARGS(c, direct, kicked, min_nr, min_key_cache, prereserved, prereserved_total,
btree_cache_dirty, btree_cache_total,
btree_key_cache_dirty, btree_key_cache_total),
TP_STRUCT__entry(
__field(dev_t, dev )
__field(bool, direct )
__field(bool, kicked )
__field(u64, min_nr )
__field(u64, min_key_cache )
__field(u64, prereserved )
__field(u64, prereserved_total )
__field(u64, btree_cache_dirty )
@ -163,7 +167,10 @@ TRACE_EVENT(journal_reclaim_start,
TP_fast_assign(
__entry->dev = c->dev;
__entry->direct = direct;
__entry->kicked = kicked;
__entry->min_nr = min_nr;
__entry->min_key_cache = min_key_cache;
__entry->prereserved = prereserved;
__entry->prereserved_total = prereserved_total;
__entry->btree_cache_dirty = btree_cache_dirty;
@ -172,9 +179,12 @@ TRACE_EVENT(journal_reclaim_start,
__entry->btree_key_cache_total = btree_key_cache_total;
),
TP_printk("%d,%d min %llu prereserved %llu/%llu btree cache %llu/%llu key cache %llu/%llu",
TP_printk("%d,%d direct %u kicked %u min %llu key cache %llu prereserved %llu/%llu btree cache %llu/%llu key cache %llu/%llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->direct,
__entry->kicked,
__entry->min_nr,
__entry->min_key_cache,
__entry->prereserved,
__entry->prereserved_total,
__entry->btree_cache_dirty,
@ -197,45 +207,13 @@ TRACE_EVENT(journal_reclaim_finish,
__entry->nr_flushed = nr_flushed;
),
TP_printk("%d%d flushed %llu",
TP_printk("%d,%d flushed %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->nr_flushed)
);
/* allocator: */
TRACE_EVENT(do_discards,
TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
u64 need_journal_commit, u64 discarded, int ret),
TP_ARGS(c, seen, open, need_journal_commit, discarded, ret),
TP_STRUCT__entry(
__field(dev_t, dev )
__field(u64, seen )
__field(u64, open )
__field(u64, need_journal_commit )
__field(u64, discarded )
__field(int, ret )
),
TP_fast_assign(
__entry->dev = c->dev;
__entry->seen = seen;
__entry->open = open;
__entry->need_journal_commit = need_journal_commit;
__entry->discarded = discarded;
__entry->ret = ret;
),
TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu ret %i",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->seen,
__entry->open,
__entry->need_journal_commit,
__entry->discarded,
__entry->ret)
);
/* bset.c: */
DEFINE_EVENT(bpos, bkey_pack_pos_fail,
@ -370,6 +348,11 @@ DEFINE_EVENT(btree_node, btree_merge,
TP_ARGS(c, b)
);
DEFINE_EVENT(btree_node, btree_rewrite,
TP_PROTO(struct bch_fs *c, struct btree *b),
TP_ARGS(c, b)
);
DEFINE_EVENT(btree_node, btree_set_root,
TP_PROTO(struct bch_fs *c, struct btree *b),
TP_ARGS(c, b)
@ -443,79 +426,18 @@ TRACE_EVENT(btree_node_relock_fail,
/* Garbage collection */
DEFINE_EVENT(btree_node, btree_gc_rewrite_node,
TP_PROTO(struct bch_fs *c, struct btree *b),
TP_ARGS(c, b)
);
DEFINE_EVENT(btree_node, btree_gc_rewrite_node_fail,
TP_PROTO(struct bch_fs *c, struct btree *b),
TP_ARGS(c, b)
);
DEFINE_EVENT(bch_fs, gc_start,
DEFINE_EVENT(bch_fs, gc_gens_start,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
DEFINE_EVENT(bch_fs, gc_end,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
DEFINE_EVENT(bch_fs, gc_cannot_inc_gens,
DEFINE_EVENT(bch_fs, gc_gens_end,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
/* Allocator */
TRACE_EVENT(alloc_scan,
TP_PROTO(struct bch_dev *ca, u64 found, u64 inc_gen, u64 inc_gen_skipped),
TP_ARGS(ca, found, inc_gen, inc_gen_skipped),
TP_STRUCT__entry(
__field(dev_t, dev )
__field(u64, found )
__field(u64, inc_gen )
__field(u64, inc_gen_skipped )
),
TP_fast_assign(
__entry->dev = ca->dev;
__entry->found = found;
__entry->inc_gen = inc_gen;
__entry->inc_gen_skipped = inc_gen_skipped;
),
TP_printk("%d,%d found %llu inc_gen %llu inc_gen_skipped %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->found, __entry->inc_gen, __entry->inc_gen_skipped)
);
TRACE_EVENT(invalidate,
TP_PROTO(struct bch_dev *ca, u64 offset, unsigned sectors),
TP_ARGS(ca, offset, sectors),
TP_STRUCT__entry(
__field(unsigned, sectors )
__field(dev_t, dev )
__field(__u64, offset )
),
TP_fast_assign(
__entry->dev = ca->dev;
__entry->offset = offset,
__entry->sectors = sectors;
),
TP_printk("invalidated %u sectors at %d,%d sector=%llu",
__entry->sectors,
MAJOR(__entry->dev),
MINOR(__entry->dev),
__entry->offset)
);
DECLARE_EVENT_CLASS(bucket_alloc,
TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
u64 avail,
@ -587,6 +509,59 @@ DEFINE_EVENT(bucket_alloc, bucket_alloc_fail,
TP_ARGS(ca, alloc_reserve, avail, seen, open, need_journal_commit, nouse, nonblocking, ret)
);
TRACE_EVENT(discard_buckets,
TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
u64 need_journal_commit, u64 discarded, int ret),
TP_ARGS(c, seen, open, need_journal_commit, discarded, ret),
TP_STRUCT__entry(
__field(dev_t, dev )
__field(u64, seen )
__field(u64, open )
__field(u64, need_journal_commit )
__field(u64, discarded )
__field(int, ret )
),
TP_fast_assign(
__entry->dev = c->dev;
__entry->seen = seen;
__entry->open = open;
__entry->need_journal_commit = need_journal_commit;
__entry->discarded = discarded;
__entry->ret = ret;
),
TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu ret %i",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->seen,
__entry->open,
__entry->need_journal_commit,
__entry->discarded,
__entry->ret)
);
TRACE_EVENT(invalidate_bucket,
TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket),
TP_ARGS(c, dev, bucket),
TP_STRUCT__entry(
__field(dev_t, dev )
__field(u32, dev_idx )
__field(u64, bucket )
),
TP_fast_assign(
__entry->dev = c->dev;
__entry->dev_idx = dev;
__entry->bucket = bucket;
),
TP_printk("%d:%d invalidated %u:%llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->dev_idx, __entry->bucket)
);
/* Moving IO */
DEFINE_EVENT(bkey, move_extent,
@ -594,7 +569,7 @@ DEFINE_EVENT(bkey, move_extent,
TP_ARGS(k)
);
DEFINE_EVENT(bkey, move_alloc_fail,
DEFINE_EVENT(bkey, move_alloc_mem_fail,
TP_PROTO(const struct bkey *k),
TP_ARGS(k)
);
@ -678,7 +653,7 @@ TRACE_EVENT(copygc_wait,
__entry->wait_amount, __entry->until)
);
DECLARE_EVENT_CLASS(transaction_restart,
DECLARE_EVENT_CLASS(transaction_event,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip),
TP_ARGS(trans_fn, caller_ip),
@ -696,55 +671,61 @@ DECLARE_EVENT_CLASS(transaction_restart,
TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
);
DEFINE_EVENT(transaction_restart, transaction_restart_ip,
DEFINE_EVENT(transaction_event, transaction_commit,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip),
TP_ARGS(trans_fn, caller_ip)
);
DEFINE_EVENT(transaction_restart, trans_blocked_journal_reclaim,
DEFINE_EVENT(transaction_event, transaction_restart_ip,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip),
TP_ARGS(trans_fn, caller_ip)
);
DEFINE_EVENT(transaction_restart, trans_restart_journal_res_get,
DEFINE_EVENT(transaction_event, trans_blocked_journal_reclaim,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip),
TP_ARGS(trans_fn, caller_ip)
);
DEFINE_EVENT(transaction_restart, trans_restart_journal_preres_get,
DEFINE_EVENT(transaction_event, trans_restart_journal_res_get,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip),
TP_ARGS(trans_fn, caller_ip)
);
DEFINE_EVENT(transaction_restart, trans_restart_journal_reclaim,
DEFINE_EVENT(transaction_event, trans_restart_journal_preres_get,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip),
TP_ARGS(trans_fn, caller_ip)
);
DEFINE_EVENT(transaction_restart, trans_restart_fault_inject,
DEFINE_EVENT(transaction_event, trans_restart_journal_reclaim,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip),
TP_ARGS(trans_fn, caller_ip)
);
DEFINE_EVENT(transaction_restart, trans_traverse_all,
DEFINE_EVENT(transaction_event, trans_restart_fault_inject,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip),
TP_ARGS(trans_fn, caller_ip)
);
DEFINE_EVENT(transaction_restart, trans_restart_mark_replicas,
DEFINE_EVENT(transaction_event, trans_traverse_all,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip),
TP_ARGS(trans_fn, caller_ip)
);
DEFINE_EVENT(transaction_restart, trans_restart_key_cache_raced,
DEFINE_EVENT(transaction_event, trans_restart_mark_replicas,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip),
TP_ARGS(trans_fn, caller_ip)
);
DEFINE_EVENT(transaction_event, trans_restart_key_cache_raced,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip),
TP_ARGS(trans_fn, caller_ip)