mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-14 06:35:12 +00:00
4ab67149f3
Extend bpf_mem_alloc to cache free list of fixed size per-cpu allocations. Once such cache is created bpf_mem_cache_alloc() will return per-cpu objects. bpf_mem_cache_free() will free them back into global per-cpu pool after observing RCU grace period. per-cpu flavor of bpf_mem_alloc is going to be used by per-cpu hash maps. The free list cache consists of tuples { llist_node, per-cpu pointer } Unlike alloc_percpu() that returns per-cpu pointer the bpf_mem_cache_alloc() returns a pointer to per-cpu pointer and bpf_mem_cache_free() expects to receive it back. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Acked-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20220902211058.60789-11-alexei.starovoitov@gmail.com
26 lines
807 B
C
26 lines
807 B
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
|
|
#ifndef _BPF_MEM_ALLOC_H
|
|
#define _BPF_MEM_ALLOC_H
|
|
#include <linux/compiler_types.h>
|
|
|
|
struct bpf_mem_cache;
|
|
struct bpf_mem_caches;
|
|
|
|
struct bpf_mem_alloc {
|
|
struct bpf_mem_caches __percpu *caches;
|
|
struct bpf_mem_cache __percpu *cache;
|
|
};
|
|
|
|
int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu);
|
|
void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma);
|
|
|
|
/* kmalloc/kfree equivalent: */
|
|
void *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size);
|
|
void bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr);
|
|
|
|
/* kmem_cache_alloc/free equivalent: */
|
|
void *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma);
|
|
void bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr);
|
|
|
|
#endif /* _BPF_MEM_ALLOC_H */
|