bpf: Re-enable unit_size checking for global per-cpu allocator

With pcpu_alloc_size() in place, check whether or not the size of
the dynamic per-cpu area is matched with unit_size.

Signed-off-by: Hou Tao <houtao1@huawei.com>
Link: https://lore.kernel.org/r/20231020133202.4043247-4-houtao@huaweicloud.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Hou Tao 2023-10-20 21:31:58 +08:00 committed by Alexei Starovoitov
parent b460bc8302
commit baa8fdecd8

View file

@ -491,21 +491,17 @@ static int check_obj_size(struct bpf_mem_cache *c, unsigned int idx)
struct llist_node *first;
unsigned int obj_size;
/* For per-cpu allocator, the size of free objects in free list doesn't
* match with unit_size and now there is no way to get the size of
* per-cpu pointer saved in free object, so just skip the checking.
*/
if (c->percpu_size)
return 0;
first = c->free_llist.first;
if (!first)
return 0;
obj_size = ksize(first);
if (c->percpu_size)
obj_size = pcpu_alloc_size(((void **)first)[1]);
else
obj_size = ksize(first);
if (obj_size != c->unit_size) {
WARN_ONCE(1, "bpf_mem_cache[%u]: unexpected object size %u, expect %u\n",
idx, obj_size, c->unit_size);
WARN_ONCE(1, "bpf_mem_cache[%u]: percpu %d, unexpected object size %u, expect %u\n",
idx, c->percpu_size, obj_size, c->unit_size);
return -EINVAL;
}
return 0;
@ -973,6 +969,12 @@ void notrace *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags)
return !ret ? NULL : ret + LLIST_NODE_SZ;
}
/* The alignment of dynamic per-cpu area is 8, so c->unit_size and the
* actual size of dynamic per-cpu area will always be matched and there is
* no need to adjust size_index for per-cpu allocation. However for the
* simplicity of the implementation, use an unified size_index for both
* kmalloc and per-cpu allocation.
*/
static __init int bpf_mem_cache_adjust_size(void)
{
unsigned int size;