bpf: add bpffs pretty print for cgroup local storage maps

Implement bpffs pretty printing for cgroup local storage maps
(both shared and per-cpu).
Output example (captured for tools/testing/selftests/bpf/netcnt_prog.c):

Shared:
  $ cat /sys/fs/bpf/map_2
  # WARNING!! The output is for debug purpose only
  # WARNING!! The output format will change
  {4294968594,1}: {9999,1039896}

Per-cpu:
  $ cat /sys/fs/bpf/map_1
  # WARNING!! The output is for debug purpose only
  # WARNING!! The output format will change
  {4294968594,1}: {
  	cpu0: {0,0,0,0,0}
  	cpu1: {0,0,0,0,0}
  	cpu2: {1,104,0,0,0}
  	cpu3: {0,0,0,0,0}
  }

Signed-off-by: Roman Gushchin <guro@fb.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Roman Gushchin 2018-12-10 15:43:01 -08:00 committed by Alexei Starovoitov
parent 1b2b234b13
commit 9a1126b631
3 changed files with 115 additions and 1 deletions

View File

@ -47,6 +47,7 @@ void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
int btf_get_fd_by_id(u32 id);
u32 btf_id(const struct btf *btf);
bool btf_name_offset_valid(const struct btf *btf, u32 offset);
bool btf_type_is_reg_int(const struct btf_type *t, u32 expected_size);
#ifdef CONFIG_BPF_SYSCALL
const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);

View File

@ -514,6 +514,28 @@ static bool btf_type_int_is_regular(const struct btf_type *t)
return true;
}
/*
* Check that given type is a regular int and has the expected size.
*/
bool btf_type_is_reg_int(const struct btf_type *t, u32 expected_size)
{
u8 nr_bits, nr_bytes;
u32 int_data;
if (!btf_type_is_int(t))
return false;
int_data = btf_type_int(t);
nr_bits = BTF_INT_BITS(int_data);
nr_bytes = BITS_ROUNDUP_BYTES(nr_bits);
if (BITS_PER_BYTE_MASKED(nr_bits) ||
BTF_INT_OFFSET(int_data) ||
nr_bytes != expected_size)
return false;
return true;
}
__printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log,
const char *fmt, ...)
{

View File

@ -1,11 +1,13 @@
//SPDX-License-Identifier: GPL-2.0
#include <linux/bpf-cgroup.h>
#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/bug.h>
#include <linux/filter.h>
#include <linux/mm.h>
#include <linux/rbtree.h>
#include <linux/slab.h>
#include <uapi/linux/btf.h>
DEFINE_PER_CPU(struct bpf_cgroup_storage*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
@ -308,6 +310,94 @@ static int cgroup_storage_delete_elem(struct bpf_map *map, void *key)
return -EINVAL;
}
static int cgroup_storage_check_btf(const struct bpf_map *map,
const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type)
{
const struct btf_type *t;
struct btf_member *m;
u32 id, size;
/* Key is expected to be of struct bpf_cgroup_storage_key type,
* which is:
* struct bpf_cgroup_storage_key {
* __u64 cgroup_inode_id;
* __u32 attach_type;
* };
*/
/*
* Key_type must be a structure with two fields.
*/
if (BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT ||
BTF_INFO_VLEN(key_type->info) != 2)
return -EINVAL;
/*
* The first field must be a 64 bit integer at 0 offset.
*/
m = (struct btf_member *)(key_type + 1);
if (m->offset)
return -EINVAL;
id = m->type;
t = btf_type_id_size(btf, &id, NULL);
size = FIELD_SIZEOF(struct bpf_cgroup_storage_key, cgroup_inode_id);
if (!t || !btf_type_is_reg_int(t, size))
return -EINVAL;
/*
* The second field must be a 32 bit integer at 64 bit offset.
*/
m++;
if (m->offset != offsetof(struct bpf_cgroup_storage_key, attach_type) *
BITS_PER_BYTE)
return -EINVAL;
id = m->type;
t = btf_type_id_size(btf, &id, NULL);
size = FIELD_SIZEOF(struct bpf_cgroup_storage_key, attach_type);
if (!t || !btf_type_is_reg_int(t, size))
return -EINVAL;
return 0;
}
static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *_key,
struct seq_file *m)
{
enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
struct bpf_cgroup_storage_key *key = _key;
struct bpf_cgroup_storage *storage;
int cpu;
rcu_read_lock();
storage = cgroup_storage_lookup(map_to_storage(map), key, false);
if (!storage) {
rcu_read_unlock();
return;
}
btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
stype = cgroup_storage_type(map);
if (stype == BPF_CGROUP_STORAGE_SHARED) {
seq_puts(m, ": ");
btf_type_seq_show(map->btf, map->btf_value_type_id,
&READ_ONCE(storage->buf)->data[0], m);
seq_puts(m, "\n");
} else {
seq_puts(m, ": {\n");
for_each_possible_cpu(cpu) {
seq_printf(m, "\tcpu%d: ", cpu);
btf_type_seq_show(map->btf, map->btf_value_type_id,
per_cpu_ptr(storage->percpu_buf, cpu),
m);
seq_puts(m, "\n");
}
seq_puts(m, "}\n");
}
rcu_read_unlock();
}
const struct bpf_map_ops cgroup_storage_map_ops = {
.map_alloc = cgroup_storage_map_alloc,
.map_free = cgroup_storage_map_free,
@ -315,7 +405,8 @@ const struct bpf_map_ops cgroup_storage_map_ops = {
.map_lookup_elem = cgroup_storage_lookup_elem,
.map_update_elem = cgroup_storage_update_elem,
.map_delete_elem = cgroup_storage_delete_elem,
.map_check_btf = map_check_no_btf,
.map_check_btf = cgroup_storage_check_btf,
.map_seq_show_elem = cgroup_storage_seq_show_elem,
};
int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map)