diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index e6a81af516f6..01dda2f65d36 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -928,6 +928,39 @@ static int write_bpf_prog_info(struct feat_fd *ff __maybe_unused, } #endif // HAVE_LIBBPF_SUPPORT +static int write_bpf_btf(struct feat_fd *ff, + struct perf_evlist *evlist __maybe_unused) +{ + struct perf_env *env = &ff->ph->env; + struct rb_root *root; + struct rb_node *next; + int ret; + + down_read(&env->bpf_progs.lock); + + ret = do_write(ff, &env->bpf_progs.btfs_cnt, + sizeof(env->bpf_progs.btfs_cnt)); + + if (ret < 0) + goto out; + + root = &env->bpf_progs.btfs; + next = rb_first(root); + while (next) { + struct btf_node *node; + + node = rb_entry(next, struct btf_node, rb_node); + next = rb_next(&node->rb_node); + ret = do_write(ff, &node->id, + sizeof(u32) * 2 + node->data_size); + if (ret < 0) + goto out; + } +out: + up_read(&env->bpf_progs.lock); + return ret; +} + static int cpu_cache_level__sort(const void *a, const void *b) { struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a; @@ -1442,6 +1475,28 @@ static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp) up_read(&env->bpf_progs.lock); } +static void print_bpf_btf(struct feat_fd *ff, FILE *fp) +{ + struct perf_env *env = &ff->ph->env; + struct rb_root *root; + struct rb_node *next; + + down_read(&env->bpf_progs.lock); + + root = &env->bpf_progs.btfs; + next = rb_first(root); + + while (next) { + struct btf_node *node; + + node = rb_entry(next, struct btf_node, rb_node); + next = rb_next(&node->rb_node); + fprintf(fp, "# btf info of id %u\n", node->id); + } + + up_read(&env->bpf_progs.lock); +} + static void free_event_desc(struct perf_evsel *events) { struct perf_evsel *evsel; @@ -2564,6 +2619,49 @@ static int process_bpf_prog_info(struct feat_fd *ff __maybe_unused, void *data _ } #endif // HAVE_LIBBPF_SUPPORT +static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused) +{ + struct perf_env *env = &ff->ph->env; + u32 count, i; + + if (ff->ph->needs_swap) { + pr_warning("interpreting btf from systems with endianity is not yet supported\n"); + return 0; + } + + if (do_read_u32(ff, &count)) + return -1; + + down_write(&env->bpf_progs.lock); + + for (i = 0; i < count; ++i) { + struct btf_node *node; + u32 id, data_size; + + if (do_read_u32(ff, &id)) + return -1; + if (do_read_u32(ff, &data_size)) + return -1; + + node = malloc(sizeof(struct btf_node) + data_size); + if (!node) + return -1; + + node->id = id; + node->data_size = data_size; + + if (__do_read(ff, node->data, data_size)) { + free(node); + return -1; + } + + perf_env__insert_btf(env, node); + } + + up_write(&env->bpf_progs.lock); + return 0; +} + struct feature_ops { int (*write)(struct feat_fd *ff, struct perf_evlist *evlist); void (*print)(struct feat_fd *ff, FILE *fp); @@ -2625,7 +2723,8 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = { FEAT_OPR(MEM_TOPOLOGY, mem_topology, true), FEAT_OPR(CLOCKID, clockid, false), FEAT_OPN(DIR_FORMAT, dir_format, false), - FEAT_OPR(BPF_PROG_INFO, bpf_prog_info, false) + FEAT_OPR(BPF_PROG_INFO, bpf_prog_info, false), + FEAT_OPR(BPF_BTF, bpf_btf, false), }; struct header_print_data { diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index 1dc85f0f895d..386da49e1bfa 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -41,6 +41,7 @@ enum { HEADER_CLOCKID, HEADER_DIR_FORMAT, HEADER_BPF_PROG_INFO, + HEADER_BPF_BTF, HEADER_LAST_FEATURE, HEADER_FEAT_BITS = 256, };